var/home/core/zuul-output/0000755000175000017500000000000015112321317014521 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015112347172015474 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000007007170415112347163017706 0ustar rootrootNov 28 13:29:08 crc systemd[1]: Starting Kubernetes Kubelet... Nov 28 13:29:08 crc restorecon[4748]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:08 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 28 13:29:09 crc restorecon[4748]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 28 13:29:10 crc kubenswrapper[4857]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:29:10 crc kubenswrapper[4857]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 28 13:29:10 crc kubenswrapper[4857]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:29:10 crc kubenswrapper[4857]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:29:10 crc kubenswrapper[4857]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 28 13:29:10 crc kubenswrapper[4857]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.046722 4857 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051014 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051042 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051050 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051055 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051061 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051065 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051072 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051077 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051082 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051087 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051092 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051100 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051108 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051114 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051121 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051128 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051134 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051145 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051154 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051161 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051167 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051173 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051180 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051185 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051190 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051195 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051200 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051205 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051211 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051218 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051223 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051228 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051233 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051239 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051244 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051249 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051254 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051259 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051264 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051269 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051274 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051279 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051285 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051290 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051295 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051300 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051307 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051312 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051317 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051323 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051328 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051333 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051337 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051342 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051347 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051354 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051359 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051366 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051372 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051378 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051385 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051391 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051397 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051403 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051408 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051413 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051418 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051423 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051427 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051432 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.051437 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051804 4857 flags.go:64] FLAG: --address="0.0.0.0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051822 4857 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051833 4857 flags.go:64] FLAG: --anonymous-auth="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051840 4857 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051849 4857 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051855 4857 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051863 4857 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051880 4857 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051885 4857 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051891 4857 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051898 4857 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051905 4857 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051911 4857 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051917 4857 flags.go:64] FLAG: --cgroup-root="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051923 4857 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051928 4857 flags.go:64] FLAG: --client-ca-file="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051933 4857 flags.go:64] FLAG: --cloud-config="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051939 4857 flags.go:64] FLAG: --cloud-provider="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051969 4857 flags.go:64] FLAG: --cluster-dns="[]" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051977 4857 flags.go:64] FLAG: --cluster-domain="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051985 4857 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051992 4857 flags.go:64] FLAG: --config-dir="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.051999 4857 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052005 4857 flags.go:64] FLAG: --container-log-max-files="5" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052012 4857 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052018 4857 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052023 4857 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052030 4857 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052036 4857 flags.go:64] FLAG: --contention-profiling="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052041 4857 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052047 4857 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052053 4857 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052059 4857 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052066 4857 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052071 4857 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052077 4857 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052083 4857 flags.go:64] FLAG: --enable-load-reader="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052089 4857 flags.go:64] FLAG: --enable-server="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052094 4857 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052101 4857 flags.go:64] FLAG: --event-burst="100" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052107 4857 flags.go:64] FLAG: --event-qps="50" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052113 4857 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052121 4857 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052126 4857 flags.go:64] FLAG: --eviction-hard="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052133 4857 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052139 4857 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052145 4857 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052151 4857 flags.go:64] FLAG: --eviction-soft="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052157 4857 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052162 4857 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052168 4857 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052173 4857 flags.go:64] FLAG: --experimental-mounter-path="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052178 4857 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052184 4857 flags.go:64] FLAG: --fail-swap-on="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052190 4857 flags.go:64] FLAG: --feature-gates="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052196 4857 flags.go:64] FLAG: --file-check-frequency="20s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052202 4857 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052208 4857 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052214 4857 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052219 4857 flags.go:64] FLAG: --healthz-port="10248" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052225 4857 flags.go:64] FLAG: --help="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052232 4857 flags.go:64] FLAG: --hostname-override="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052238 4857 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052246 4857 flags.go:64] FLAG: --http-check-frequency="20s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052252 4857 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052258 4857 flags.go:64] FLAG: --image-credential-provider-config="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052263 4857 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052269 4857 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052275 4857 flags.go:64] FLAG: --image-service-endpoint="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052281 4857 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052286 4857 flags.go:64] FLAG: --kube-api-burst="100" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052292 4857 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052300 4857 flags.go:64] FLAG: --kube-api-qps="50" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052307 4857 flags.go:64] FLAG: --kube-reserved="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052315 4857 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052321 4857 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052328 4857 flags.go:64] FLAG: --kubelet-cgroups="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052335 4857 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052341 4857 flags.go:64] FLAG: --lock-file="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052346 4857 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052352 4857 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052357 4857 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052366 4857 flags.go:64] FLAG: --log-json-split-stream="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052373 4857 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052379 4857 flags.go:64] FLAG: --log-text-split-stream="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052385 4857 flags.go:64] FLAG: --logging-format="text" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052397 4857 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052406 4857 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052412 4857 flags.go:64] FLAG: --manifest-url="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052418 4857 flags.go:64] FLAG: --manifest-url-header="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052425 4857 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052432 4857 flags.go:64] FLAG: --max-open-files="1000000" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052440 4857 flags.go:64] FLAG: --max-pods="110" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052446 4857 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052452 4857 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052458 4857 flags.go:64] FLAG: --memory-manager-policy="None" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052464 4857 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052469 4857 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052475 4857 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052481 4857 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052494 4857 flags.go:64] FLAG: --node-status-max-images="50" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052500 4857 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052505 4857 flags.go:64] FLAG: --oom-score-adj="-999" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052511 4857 flags.go:64] FLAG: --pod-cidr="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052516 4857 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052525 4857 flags.go:64] FLAG: --pod-manifest-path="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052531 4857 flags.go:64] FLAG: --pod-max-pids="-1" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052536 4857 flags.go:64] FLAG: --pods-per-core="0" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052543 4857 flags.go:64] FLAG: --port="10250" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052549 4857 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052554 4857 flags.go:64] FLAG: --provider-id="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052559 4857 flags.go:64] FLAG: --qos-reserved="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052565 4857 flags.go:64] FLAG: --read-only-port="10255" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052571 4857 flags.go:64] FLAG: --register-node="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052576 4857 flags.go:64] FLAG: --register-schedulable="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052581 4857 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052590 4857 flags.go:64] FLAG: --registry-burst="10" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052595 4857 flags.go:64] FLAG: --registry-qps="5" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052603 4857 flags.go:64] FLAG: --reserved-cpus="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052609 4857 flags.go:64] FLAG: --reserved-memory="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052621 4857 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052627 4857 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052633 4857 flags.go:64] FLAG: --rotate-certificates="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052638 4857 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052644 4857 flags.go:64] FLAG: --runonce="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052649 4857 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052655 4857 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052661 4857 flags.go:64] FLAG: --seccomp-default="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052666 4857 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052672 4857 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052678 4857 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052684 4857 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052689 4857 flags.go:64] FLAG: --storage-driver-password="root" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052694 4857 flags.go:64] FLAG: --storage-driver-secure="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052700 4857 flags.go:64] FLAG: --storage-driver-table="stats" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052706 4857 flags.go:64] FLAG: --storage-driver-user="root" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052711 4857 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052717 4857 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052722 4857 flags.go:64] FLAG: --system-cgroups="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052728 4857 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052736 4857 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052742 4857 flags.go:64] FLAG: --tls-cert-file="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052747 4857 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052753 4857 flags.go:64] FLAG: --tls-min-version="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052759 4857 flags.go:64] FLAG: --tls-private-key-file="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052764 4857 flags.go:64] FLAG: --topology-manager-policy="none" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052770 4857 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052775 4857 flags.go:64] FLAG: --topology-manager-scope="container" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052781 4857 flags.go:64] FLAG: --v="2" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052788 4857 flags.go:64] FLAG: --version="false" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052797 4857 flags.go:64] FLAG: --vmodule="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052803 4857 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.052810 4857 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.052964 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.052972 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.052978 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.052984 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.052990 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.052997 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053002 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053007 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053012 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053016 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053021 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053026 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053031 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053035 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053040 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053045 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053050 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053055 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053062 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053067 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053071 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053076 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053081 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053085 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053090 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053095 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053099 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053104 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053109 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053116 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053121 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053125 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053130 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053135 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053140 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053144 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053149 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053154 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.053245 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054426 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054446 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054454 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054461 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054469 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054476 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054482 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054489 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054495 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054501 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054507 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054514 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054528 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054535 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054542 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054548 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054553 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054559 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054565 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054571 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054576 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054582 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054590 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054595 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054605 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054610 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054616 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054621 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054626 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054631 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054636 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.054641 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.054651 4857 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.064752 4857 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.064800 4857 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064895 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064905 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064912 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064919 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064925 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064931 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064937 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064960 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064968 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064974 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064980 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064986 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.064993 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065001 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065008 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065014 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065021 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065027 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065032 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065038 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065044 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065049 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065055 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065061 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065067 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065072 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065078 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065084 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065090 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065096 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065102 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065110 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065117 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065127 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065133 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065139 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065145 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065151 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065157 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065163 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065168 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065174 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065181 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065190 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065196 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065202 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065208 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065215 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065222 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065227 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065234 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065239 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065245 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065251 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065257 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065264 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065271 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065277 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065283 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065289 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065294 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065301 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065310 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065316 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065323 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065329 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065336 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065341 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065347 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065352 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065358 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.065368 4857 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065545 4857 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065557 4857 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065565 4857 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065572 4857 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065578 4857 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065584 4857 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065590 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065596 4857 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065602 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065608 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065614 4857 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065620 4857 feature_gate.go:330] unrecognized feature gate: Example Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065626 4857 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065631 4857 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065637 4857 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065643 4857 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065649 4857 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065654 4857 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065660 4857 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065676 4857 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065682 4857 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065688 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065695 4857 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065701 4857 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065707 4857 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065713 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065719 4857 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065724 4857 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065730 4857 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065737 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065743 4857 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065748 4857 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065754 4857 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065759 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065765 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065771 4857 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065777 4857 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065783 4857 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065788 4857 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065795 4857 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065801 4857 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065806 4857 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065811 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065817 4857 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065822 4857 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065827 4857 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065833 4857 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065842 4857 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065849 4857 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065855 4857 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065860 4857 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065866 4857 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065873 4857 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065879 4857 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065885 4857 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065892 4857 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065899 4857 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065906 4857 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065912 4857 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065919 4857 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065927 4857 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065933 4857 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065938 4857 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065960 4857 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065966 4857 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065972 4857 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065978 4857 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065983 4857 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065988 4857 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065994 4857 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.065999 4857 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.066008 4857 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.066589 4857 server.go:940] "Client rotation is on, will bootstrap in background" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.070571 4857 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.070678 4857 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.071389 4857 server.go:997] "Starting client certificate rotation" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.071416 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.071695 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-15 17:43:11.452223955 +0000 UTC Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.071797 4857 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1156h14m1.380430919s for next certificate rotation Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.077233 4857 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.078802 4857 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.087367 4857 log.go:25] "Validated CRI v1 runtime API" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.109810 4857 log.go:25] "Validated CRI v1 image API" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.112499 4857 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.117976 4857 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-28-13-24-43-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.118026 4857 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.147621 4857 manager.go:217] Machine: {Timestamp:2025-11-28 13:29:10.145171456 +0000 UTC m=+0.269112973 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654132736 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:fb89c177-76d4-4664-9e2a-ee1d63d6009b BootID:2cd46941-ebc0-4d64-8ed5-520d3d122aa4 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827068416 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730829824 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:02:77:4b Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:02:77:4b Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:54:9a:2a Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:d6:48:d4 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:e2:84:dc Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:c9:b5:ea Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:b8:40:10 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:b6:45:27:56:4d:92 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:ea:70:64:77:af:b3 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654132736 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.148184 4857 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.148485 4857 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.149160 4857 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.149564 4857 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.149657 4857 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.150283 4857 topology_manager.go:138] "Creating topology manager with none policy" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.150326 4857 container_manager_linux.go:303] "Creating device plugin manager" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.150749 4857 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.150844 4857 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.151439 4857 state_mem.go:36] "Initialized new in-memory state store" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.151604 4857 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.153198 4857 kubelet.go:418] "Attempting to sync node with API server" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.153250 4857 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.153348 4857 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.153378 4857 kubelet.go:324] "Adding apiserver pod source" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.153399 4857 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.156163 4857 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.156848 4857 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.156871 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.156877 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.157072 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.157111 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.158247 4857 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159231 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159289 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159313 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159333 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159362 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159377 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159392 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159416 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159434 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159454 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159477 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.159491 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.160136 4857 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.161009 4857 server.go:1280] "Started kubelet" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.161181 4857 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.161294 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.161338 4857 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.162559 4857 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 28 13:29:10 crc systemd[1]: Started Kubernetes Kubelet. Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.164649 4857 server.go:460] "Adding debug handlers to kubelet server" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.166098 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.166172 4857 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.166283 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 21:57:43.68713516 +0000 UTC Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.166350 4857 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 296h28m33.520789671s for next certificate rotation Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.167635 4857 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.167777 4857 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.167793 4857 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.167983 4857 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.165661 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187c2ebe9894252c default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:29:10.16090142 +0000 UTC m=+0.284842897,LastTimestamp:2025-11-28 13:29:10.16090142 +0000 UTC m=+0.284842897,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.169834 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.170109 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.170681 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="200ms" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.173790 4857 factory.go:153] Registering CRI-O factory Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.173842 4857 factory.go:221] Registration of the crio container factory successfully Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.174018 4857 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.174059 4857 factory.go:55] Registering systemd factory Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.174082 4857 factory.go:221] Registration of the systemd container factory successfully Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.174140 4857 factory.go:103] Registering Raw factory Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.174177 4857 manager.go:1196] Started watching for new ooms in manager Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.177125 4857 manager.go:319] Starting recovery of all containers Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181403 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181451 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181466 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181477 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181489 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181498 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181510 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181520 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181533 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181543 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181558 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181568 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181577 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181589 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181599 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181607 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181617 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181627 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181635 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181645 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181655 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181664 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181676 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181686 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.181697 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182021 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182034 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182044 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182053 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182062 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182072 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182081 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182114 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182123 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182133 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182142 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182838 4857 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182871 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182884 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182895 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182906 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182919 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182931 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182960 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182972 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182984 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.182996 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183007 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183021 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183035 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183049 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183062 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183074 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183089 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183182 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183196 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183210 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183223 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183233 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183244 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183257 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183271 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183286 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183299 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183312 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183324 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183337 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183348 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183360 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183374 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183388 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183403 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183419 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183453 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183468 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183480 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183491 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183502 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183515 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183527 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183537 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183549 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183562 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183573 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183584 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183594 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183604 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183619 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183631 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183642 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183653 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183664 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183731 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183742 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183757 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183767 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183777 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183787 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183798 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183807 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183818 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183827 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183838 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183848 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183858 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183872 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183884 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183895 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183906 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183917 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183932 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183957 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183967 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183978 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.183991 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184002 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184012 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184022 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184033 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184046 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184055 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184067 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184077 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184087 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184097 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184107 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184119 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184130 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184139 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184148 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184159 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184168 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184178 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184187 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184197 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184232 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184243 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184252 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184581 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184594 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184608 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.184619 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185195 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185265 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185293 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185321 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185344 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185367 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185389 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185410 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185444 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185467 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185486 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185519 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185538 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185557 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185587 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185606 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185629 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185649 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185669 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185694 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185715 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185740 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185762 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185781 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185803 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185823 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185843 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185863 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185885 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185905 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185929 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.185978 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186000 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186019 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186039 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186059 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186081 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186100 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186120 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186140 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186160 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186182 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186203 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186224 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186245 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186264 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186285 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186304 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186326 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186346 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186383 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186402 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186424 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186443 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186463 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186482 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186502 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186522 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186541 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186562 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186583 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186603 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186623 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186642 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186663 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186682 4857 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186703 4857 reconstruct.go:97] "Volume reconstruction finished" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.186718 4857 reconciler.go:26] "Reconciler: start to sync state" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.193110 4857 manager.go:324] Recovery completed Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.208457 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.210393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.210575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.210678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.211422 4857 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.211515 4857 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.211611 4857 state_mem.go:36] "Initialized new in-memory state store" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.221031 4857 policy_none.go:49] "None policy: Start" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.221940 4857 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.222025 4857 state_mem.go:35] "Initializing new in-memory state store" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.225388 4857 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.227367 4857 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.227454 4857 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.227516 4857 kubelet.go:2335] "Starting kubelet main sync loop" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.227622 4857 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.230514 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.230642 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.268097 4857 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.278181 4857 manager.go:334] "Starting Device Plugin manager" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.278237 4857 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.278250 4857 server.go:79] "Starting device plugin registration server" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.278693 4857 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.278711 4857 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.278893 4857 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.279108 4857 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.279137 4857 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.289788 4857 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.328097 4857 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.328277 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.329620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.329661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.329675 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330038 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330328 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330421 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330742 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330788 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.330914 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.331214 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.331289 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.331971 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332168 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332353 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.332422 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333683 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.333786 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.334301 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.334381 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.335140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.335193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.335204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.335382 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.335405 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.336118 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.336149 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.336160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.337136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.337175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.337188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.372240 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="400ms" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.379428 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.381562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.381626 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.381641 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.381679 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.382296 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.389873 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.389942 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.389994 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390023 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390195 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390274 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390305 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390336 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390382 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390405 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390431 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390454 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.390519 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491500 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491618 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491670 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491770 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491819 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491869 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491915 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491921 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492003 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492089 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492021 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492027 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492019 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492089 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.491909 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492172 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492181 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492320 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492416 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492430 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492463 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492538 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492569 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492591 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492604 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492621 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492679 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492686 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.492790 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.583156 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.585888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.585942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.585990 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.586028 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.586803 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.658176 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.679840 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.682319 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-dd007b844e7853c20ab36146f30ab4ba5f0f84d6001e8ab9a393d515e2912b71 WatchSource:0}: Error finding container dd007b844e7853c20ab36146f30ab4ba5f0f84d6001e8ab9a393d515e2912b71: Status 404 returned error can't find the container with id dd007b844e7853c20ab36146f30ab4ba5f0f84d6001e8ab9a393d515e2912b71 Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.692562 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.712190 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.717543 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.773339 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="800ms" Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.842144 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-2462df515f406f61b17cfd9dc49e4047bd716baeed3a56caa32f91fec9f30f22 WatchSource:0}: Error finding container 2462df515f406f61b17cfd9dc49e4047bd716baeed3a56caa32f91fec9f30f22: Status 404 returned error can't find the container with id 2462df515f406f61b17cfd9dc49e4047bd716baeed3a56caa32f91fec9f30f22 Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.858717 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-1425d73bcc37532b5483769f29add06a056f65d857305fb427a8b6826eefa77d WatchSource:0}: Error finding container 1425d73bcc37532b5483769f29add06a056f65d857305fb427a8b6826eefa77d: Status 404 returned error can't find the container with id 1425d73bcc37532b5483769f29add06a056f65d857305fb427a8b6826eefa77d Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.859676 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-eceb4198e000011444b7fe38d15fe122f7138bf364d985d90538db4c0001007d WatchSource:0}: Error finding container eceb4198e000011444b7fe38d15fe122f7138bf364d985d90538db4c0001007d: Status 404 returned error can't find the container with id eceb4198e000011444b7fe38d15fe122f7138bf364d985d90538db4c0001007d Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.861029 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-a9ae7a4c9cee33d064605a019665539c22bfff0e68927795875e6e19c1010fcd WatchSource:0}: Error finding container a9ae7a4c9cee33d064605a019665539c22bfff0e68927795875e6e19c1010fcd: Status 404 returned error can't find the container with id a9ae7a4c9cee33d064605a019665539c22bfff0e68927795875e6e19c1010fcd Nov 28 13:29:10 crc kubenswrapper[4857]: W1128 13:29:10.974460 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.974583 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.987268 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.988658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.988705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.988719 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:10 crc kubenswrapper[4857]: I1128 13:29:10.988761 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:10 crc kubenswrapper[4857]: E1128 13:29:10.989221 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.162891 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.235764 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.236225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"dd007b844e7853c20ab36146f30ab4ba5f0f84d6001e8ab9a393d515e2912b71"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.236315 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.238706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.238738 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.238750 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.239967 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.240016 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"eceb4198e000011444b7fe38d15fe122f7138bf364d985d90538db4c0001007d"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.240120 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.242634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.242662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.242673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.244070 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.244105 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a9ae7a4c9cee33d064605a019665539c22bfff0e68927795875e6e19c1010fcd"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.246319 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.246345 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1425d73bcc37532b5483769f29add06a056f65d857305fb427a8b6826eefa77d"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.246451 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.247473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.247503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.247514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.249571 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d848aac34e5933419aad61d2b4b49046a7704d4fee23e53070d2946647d70e79"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.249601 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2462df515f406f61b17cfd9dc49e4047bd716baeed3a56caa32f91fec9f30f22"} Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.249707 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.250405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.250428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.250440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:11 crc kubenswrapper[4857]: W1128 13:29:11.303256 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:11 crc kubenswrapper[4857]: E1128 13:29:11.303380 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:11 crc kubenswrapper[4857]: W1128 13:29:11.362518 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:11 crc kubenswrapper[4857]: E1128 13:29:11.362670 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:11 crc kubenswrapper[4857]: E1128 13:29:11.574799 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="1.6s" Nov 28 13:29:11 crc kubenswrapper[4857]: W1128 13:29:11.607874 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:11 crc kubenswrapper[4857]: E1128 13:29:11.607991 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.790140 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.791533 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.791581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.791598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:11 crc kubenswrapper[4857]: I1128 13:29:11.791662 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:11 crc kubenswrapper[4857]: E1128 13:29:11.792376 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.222:6443: connect: connection refused" node="crc" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.162293 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.255088 4857 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39" exitCode=0 Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.255219 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.255411 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.257216 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.257261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.257278 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.261600 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.261637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.261656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.264177 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.267071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.267302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.267398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.270760 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba" exitCode=0 Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.270883 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.271078 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.271940 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.271994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.272009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.275006 4857 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="d848aac34e5933419aad61d2b4b49046a7704d4fee23e53070d2946647d70e79" exitCode=0 Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.275097 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"d848aac34e5933419aad61d2b4b49046a7704d4fee23e53070d2946647d70e79"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.275249 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.275570 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276075 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276616 4857 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265" exitCode=0 Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265"} Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276901 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.276963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.277567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.277643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.278110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.278136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:12 crc kubenswrapper[4857]: I1128 13:29:12.278144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:13 crc kubenswrapper[4857]: W1128 13:29:13.010421 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.222:6443: connect: connection refused Nov 28 13:29:13 crc kubenswrapper[4857]: E1128 13:29:13.010540 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.222:6443: connect: connection refused" logger="UnhandledError" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.282590 4857 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="20ab98aa21c8412fcdc34158485cb7690fd8e8b4467bb1ab3086701997e2677c" exitCode=0 Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.283007 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"20ab98aa21c8412fcdc34158485cb7690fd8e8b4467bb1ab3086701997e2677c"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.284307 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.287880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.287963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.287988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.294966 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.295078 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.295964 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.295991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.296004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300047 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300075 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300090 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300177 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300821 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.300864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.302459 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.302454 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.302567 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.302592 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31"} Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.303070 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.303110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.303120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.393375 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.394880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.394928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.394959 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:13 crc kubenswrapper[4857]: I1128 13:29:13.395038 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.309201 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1"} Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.309273 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46"} Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.309282 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.310781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.310828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.310845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.312424 4857 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2a37878479145d6662bbf57bf6f0b3c23cbb3b5dfe077ba90c5d0fa30a5cda08" exitCode=0 Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.312455 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2a37878479145d6662bbf57bf6f0b3c23cbb3b5dfe077ba90c5d0fa30a5cda08"} Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.312749 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.313975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.314023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:14 crc kubenswrapper[4857]: I1128 13:29:14.314037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.317821 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"10f327fe4acd76bf71128672e66dc01e2eee9224de01bb356b0eb7cec633c37a"} Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.317885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0f4678ddc3433a145f1fdb1d0bde8226d5710e6fcd98cd49910a8a51005bc079"} Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.317908 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.317999 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.318877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.318923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.318936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.713176 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.713358 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.714466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.714502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.714519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:15 crc kubenswrapper[4857]: I1128 13:29:15.980112 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.324551 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6890e5083d46a2142758e2627a32e0737cff6a0e5b037b5388505be00bd442f5"} Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.324592 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.324604 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c2b9c451fad3c8f775f6cd12019fa0734b08180a5f9db6629239e42ee7dc2aab"} Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.324616 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"79786b3c521c60ad312c8360152cc00e4651827dd5b344f027c162852fda7683"} Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.324638 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.324714 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.325672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.325706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.325715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.326017 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.326046 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.326058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:16 crc kubenswrapper[4857]: I1128 13:29:16.791304 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.099231 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.327533 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.327617 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.327619 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.329351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.329385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.329406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.329419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.329439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.329424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.970603 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.970932 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.972796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.972844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.972889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:17 crc kubenswrapper[4857]: I1128 13:29:17.978809 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.329752 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.329995 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.332116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.332318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.332421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.332513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.332431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:18 crc kubenswrapper[4857]: I1128 13:29:18.332604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.169189 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.332425 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.334580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.334626 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.334640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.850369 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.850582 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.852102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.852158 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:19 crc kubenswrapper[4857]: I1128 13:29:19.852173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:20 crc kubenswrapper[4857]: E1128 13:29:20.290036 4857 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 28 13:29:21 crc kubenswrapper[4857]: I1128 13:29:21.469666 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:21 crc kubenswrapper[4857]: I1128 13:29:21.470026 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:21 crc kubenswrapper[4857]: I1128 13:29:21.472171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:21 crc kubenswrapper[4857]: I1128 13:29:21.472335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:21 crc kubenswrapper[4857]: I1128 13:29:21.472421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:21 crc kubenswrapper[4857]: I1128 13:29:21.476340 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.340486 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.341780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.341907 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.342104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.992883 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.993258 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.995080 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.995124 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:22 crc kubenswrapper[4857]: I1128 13:29:22.995139 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:23 crc kubenswrapper[4857]: W1128 13:29:23.103333 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:29:23 crc kubenswrapper[4857]: I1128 13:29:23.103501 4857 trace.go:236] Trace[737456088]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:29:13.101) (total time: 10001ms): Nov 28 13:29:23 crc kubenswrapper[4857]: Trace[737456088]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:29:23.103) Nov 28 13:29:23 crc kubenswrapper[4857]: Trace[737456088]: [10.001927701s] [10.001927701s] END Nov 28 13:29:23 crc kubenswrapper[4857]: E1128 13:29:23.103546 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:29:23 crc kubenswrapper[4857]: I1128 13:29:23.163099 4857 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:29:23 crc kubenswrapper[4857]: E1128 13:29:23.176315 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" interval="3.2s" Nov 28 13:29:23 crc kubenswrapper[4857]: I1128 13:29:23.179921 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 13:29:23 crc kubenswrapper[4857]: I1128 13:29:23.180547 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 13:29:23 crc kubenswrapper[4857]: E1128 13:29:23.396762 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": net/http: TLS handshake timeout" node="crc" Nov 28 13:29:23 crc kubenswrapper[4857]: W1128 13:29:23.868853 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:29:23 crc kubenswrapper[4857]: I1128 13:29:23.869080 4857 trace.go:236] Trace[1729852940]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:29:13.866) (total time: 10002ms): Nov 28 13:29:23 crc kubenswrapper[4857]: Trace[1729852940]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:29:23.868) Nov 28 13:29:23 crc kubenswrapper[4857]: Trace[1729852940]: [10.002106146s] [10.002106146s] END Nov 28 13:29:23 crc kubenswrapper[4857]: E1128 13:29:23.869131 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:29:24 crc kubenswrapper[4857]: W1128 13:29:24.466301 4857 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 28 13:29:24 crc kubenswrapper[4857]: I1128 13:29:24.467368 4857 trace.go:236] Trace[1699609908]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:29:14.464) (total time: 10002ms): Nov 28 13:29:24 crc kubenswrapper[4857]: Trace[1699609908]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (13:29:24.466) Nov 28 13:29:24 crc kubenswrapper[4857]: Trace[1699609908]: [10.002874367s] [10.002874367s] END Nov 28 13:29:24 crc kubenswrapper[4857]: E1128 13:29:24.467414 4857 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 28 13:29:24 crc kubenswrapper[4857]: I1128 13:29:24.469710 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 13:29:24 crc kubenswrapper[4857]: I1128 13:29:24.469879 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.032684 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.032993 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.047748 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.047820 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.354135 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.354488 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.356153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.356215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.356228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.407465 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.988394 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]log ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]etcd ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/generic-apiserver-start-informers ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/priority-and-fairness-filter ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-apiextensions-informers ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-apiextensions-controllers ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/crd-informer-synced ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-system-namespaces-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 28 13:29:25 crc kubenswrapper[4857]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 28 13:29:25 crc kubenswrapper[4857]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/bootstrap-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/start-kube-aggregator-informers ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-registration-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-discovery-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]autoregister-completion ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-openapi-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 28 13:29:25 crc kubenswrapper[4857]: livez check failed Nov 28 13:29:25 crc kubenswrapper[4857]: I1128 13:29:25.988499 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.352164 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.353510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.353578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.353593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.369583 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.597259 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.599278 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.599330 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.599343 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.599376 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:26 crc kubenswrapper[4857]: E1128 13:29:26.605822 4857 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 28 13:29:26 crc kubenswrapper[4857]: I1128 13:29:26.914538 4857 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 13:29:27 crc kubenswrapper[4857]: I1128 13:29:27.355337 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:27 crc kubenswrapper[4857]: I1128 13:29:27.357027 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:27 crc kubenswrapper[4857]: I1128 13:29:27.357068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:27 crc kubenswrapper[4857]: I1128 13:29:27.357078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:27 crc kubenswrapper[4857]: I1128 13:29:27.547777 4857 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.164671 4857 apiserver.go:52] "Watching apiserver" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.168432 4857 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.168632 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h"] Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.169010 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.169079 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:28 crc kubenswrapper[4857]: E1128 13:29:28.169209 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.169228 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.169283 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.169350 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:28 crc kubenswrapper[4857]: E1128 13:29:28.169457 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.169568 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:28 crc kubenswrapper[4857]: E1128 13:29:28.169735 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.171669 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.172249 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.172522 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.172706 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.172735 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.172866 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.172972 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.174027 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.174618 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.207800 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.221754 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.248252 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.267557 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.268964 4857 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.278631 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.290428 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.304272 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.315423 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.328915 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:28 crc kubenswrapper[4857]: I1128 13:29:28.407541 4857 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.045658 4857 trace.go:236] Trace[190655516]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Nov-2025 13:29:16.799) (total time: 13245ms): Nov 28 13:29:30 crc kubenswrapper[4857]: Trace[190655516]: ---"Objects listed" error: 13245ms (13:29:30.045) Nov 28 13:29:30 crc kubenswrapper[4857]: Trace[190655516]: [13.245965783s] [13.245965783s] END Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.045700 4857 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.048244 4857 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.076323 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50478->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.076385 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:50478->192.168.126.11:17697: read: connection reset by peer" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149570 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149619 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149649 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149718 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149747 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149786 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149807 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149828 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149866 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149889 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149909 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.149965 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150019 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150038 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150062 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150128 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150168 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150184 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150203 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150219 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150217 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150233 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150251 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150269 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150257 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150286 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150318 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150334 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150350 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150367 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150382 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150399 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150415 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150432 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150450 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150467 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150484 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150500 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150518 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150537 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150555 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150573 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150594 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150612 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150627 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150645 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150659 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150677 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150698 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150716 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150733 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150749 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150765 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150783 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150799 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150821 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150837 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150853 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150869 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150887 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150903 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150918 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150935 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150966 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150984 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151000 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151017 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151034 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151052 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151068 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151085 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151103 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151122 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151138 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151152 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151170 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151187 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151206 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151262 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151278 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151296 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151322 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151346 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151363 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151381 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151430 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151449 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151466 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151484 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151501 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151521 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151560 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151577 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151594 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151609 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151629 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151645 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151660 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151677 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151694 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151713 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151727 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151751 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151773 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151789 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151806 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151820 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151835 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151853 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151869 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151886 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151903 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151918 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151934 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151966 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151984 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152002 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152047 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152067 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152085 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152102 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152120 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152136 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152152 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152168 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152184 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152201 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152218 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152235 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152254 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152274 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152290 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152308 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152348 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152365 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152382 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152440 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152462 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152480 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152500 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152517 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152536 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152553 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152570 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152586 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152603 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152620 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152638 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152655 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152673 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152692 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152734 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152753 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152771 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152790 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152811 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152831 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152849 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152866 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152898 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153499 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153524 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153550 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153570 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153592 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153614 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153634 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153655 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153677 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153700 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153722 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153740 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153759 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153780 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153798 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153822 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153839 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153858 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153877 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153894 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153911 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153929 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153962 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153978 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153998 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154015 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154048 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154066 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154120 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154144 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154163 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154189 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154220 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154258 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154277 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154312 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154337 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154377 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154397 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154470 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154482 4857 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154495 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154508 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150494 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150643 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.150773 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151180 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151266 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151301 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151480 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151494 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151624 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.155539 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151883 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.151890 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152036 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152308 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152615 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.152901 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153116 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153127 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153215 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153371 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153348 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153404 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153776 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153844 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153903 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.153999 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154414 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154427 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154536 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154764 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.154833 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.155897 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.155905 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.155996 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156065 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156239 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156290 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156422 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156482 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156583 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156624 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156734 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.156968 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.157027 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.157121 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.157412 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.157473 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.157757 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.157981 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.158264 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.158384 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.158463 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.158553 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.158578 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.160077 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.160443 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.160501 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.160835 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.161209 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.161348 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.161470 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.161653 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.161917 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.161919 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162178 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162221 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162391 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162555 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162632 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162833 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162972 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163012 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.162937 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163072 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163129 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163219 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163384 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163600 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163700 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.163915 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164061 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164114 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.164176 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.164256 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:30.664232152 +0000 UTC m=+20.788173819 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164423 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.164492 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164529 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.164539 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:30.66452874 +0000 UTC m=+20.788470367 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164705 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164844 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.164909 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.165479 4857 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.166023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.166360 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167138 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167194 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167326 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167362 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167625 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167705 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167821 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.167846 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.168010 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.168261 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.168396 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:29:30.66837737 +0000 UTC m=+20.792318807 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.168416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.168604 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.168733 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.168813 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.169023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.169107 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.169502 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.169773 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.169971 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.169988 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170072 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170251 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170341 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170438 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170485 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170610 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170803 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170873 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.170980 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.171112 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.171320 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.171375 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.171555 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.171742 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172185 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172242 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172498 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172737 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172747 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172763 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.172789 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.173050 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.173258 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.173692 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.173129 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.174193 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.174225 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.174467 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.174577 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.174696 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.175059 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.175452 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.175576 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.177681 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.177927 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.178298 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.178425 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.178527 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.178607 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.178690 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.178804 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:30.67878626 +0000 UTC m=+20.802727697 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.178653 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.179101 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.179188 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.180004 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.180374 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.180471 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.180517 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.180795 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.181078 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.181171 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.181290 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.181625 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.181898 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182061 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182077 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182236 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182271 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.182589 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.182615 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.182628 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.182677 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:30.68266043 +0000 UTC m=+20.806601867 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182353 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182340 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.182924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.183176 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.183527 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.183928 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.184273 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.185477 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.186730 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.187181 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.187455 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.187466 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.187560 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.188360 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.188370 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.188672 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.188837 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.189186 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.189497 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.189845 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.191890 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.191929 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.192024 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.194192 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.194386 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.196590 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.196960 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.196968 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.198108 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.198189 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.199721 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.228657 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.228725 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.228794 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.228932 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.229029 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.229082 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.230753 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.232074 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.232547 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.233269 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.234562 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.235188 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.236281 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.237045 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.237664 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.239154 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.239885 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.240297 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.241332 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.241940 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.243235 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.243865 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.244631 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.245701 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.246360 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.247452 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.247912 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.248590 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.249234 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.250097 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.250558 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.251707 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.252159 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.253177 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.253833 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.254750 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.255836 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.255871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.255967 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.255984 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.255997 4857 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256009 4857 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256021 4857 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256039 4857 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256052 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256064 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256075 4857 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256091 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256103 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256113 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256124 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256135 4857 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256145 4857 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256020 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256157 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256073 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256265 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256280 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256293 4857 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256304 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256314 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256324 4857 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256336 4857 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256346 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256358 4857 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256368 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256377 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256387 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256396 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256406 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256415 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256424 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256434 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256443 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256454 4857 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256463 4857 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256474 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256482 4857 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256490 4857 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256499 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256507 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256516 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256524 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256534 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256545 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256554 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256563 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256573 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256582 4857 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256597 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256607 4857 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256533 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256618 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256629 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256638 4857 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256647 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256655 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256665 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256674 4857 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256682 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256691 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256701 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256710 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256721 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256730 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256740 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256748 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256756 4857 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256766 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256775 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256784 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256791 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256800 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256808 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256816 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256824 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256833 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256841 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256849 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256859 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256868 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256876 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256885 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256894 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256903 4857 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256911 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256921 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256929 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256937 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.256970 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257118 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257132 4857 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257142 4857 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257155 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257165 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257175 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257184 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257192 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257201 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257209 4857 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257219 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257229 4857 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257239 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257250 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257260 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257268 4857 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257277 4857 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257286 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257295 4857 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257303 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257311 4857 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257324 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257335 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257336 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257346 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257360 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257371 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257381 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257393 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257404 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257416 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257433 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257445 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257457 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257467 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257476 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257485 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257496 4857 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257507 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257519 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257560 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257572 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257582 4857 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257593 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257603 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257614 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257626 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257637 4857 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257648 4857 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257659 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257671 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257683 4857 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257694 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257708 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257771 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257789 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257801 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257814 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257826 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257839 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257851 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257867 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257878 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257891 4857 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257904 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257915 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257928 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257941 4857 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257974 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.257987 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258001 4857 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258013 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258025 4857 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258037 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258049 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258063 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258073 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258082 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258090 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258099 4857 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258108 4857 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258118 4857 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258127 4857 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258162 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258171 4857 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258180 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258190 4857 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258199 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258207 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258216 4857 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258224 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258233 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258240 4857 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258249 4857 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258257 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258267 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258279 4857 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258287 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258296 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258304 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258313 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258322 4857 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258447 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.258915 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.259180 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.260820 4857 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.260977 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.264102 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.264609 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.266105 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.268515 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.269345 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.271380 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.272410 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.273880 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.274107 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.275387 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.277458 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.279424 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.280886 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.281539 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.282291 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.283310 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.284385 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.286271 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.286526 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.286878 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.288418 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.289760 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.290606 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.291854 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.292142 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.292787 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.298374 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 28 13:29:30 crc kubenswrapper[4857]: W1128 13:29:30.301540 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-f40e56c3f9abb7a0399606f8ef0c0b0408a895dbe98c6ac58b407aeafbb9891d WatchSource:0}: Error finding container f40e56c3f9abb7a0399606f8ef0c0b0408a895dbe98c6ac58b407aeafbb9891d: Status 404 returned error can't find the container with id f40e56c3f9abb7a0399606f8ef0c0b0408a895dbe98c6ac58b407aeafbb9891d Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.302753 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.365401 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.764016 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.764090 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.764114 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.764141 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.764161 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764216 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:29:31.764173181 +0000 UTC m=+21.888114658 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764265 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764316 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:31.764302864 +0000 UTC m=+21.888244301 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764389 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764402 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764402 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764413 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764426 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764438 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:31.764432257 +0000 UTC m=+21.888373694 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764444 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764470 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764494 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:31.764480569 +0000 UTC m=+21.888422016 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:30 crc kubenswrapper[4857]: E1128 13:29:30.764513 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:31.764504899 +0000 UTC m=+21.888446346 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.988351 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.989126 4857 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.989169 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 28 13:29:30 crc kubenswrapper[4857]: I1128 13:29:30.994374 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.008661 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.022194 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.038903 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.057915 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.071929 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.088240 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.096385 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1" exitCode=255 Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.096462 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1"} Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.098656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5c3cf8fed898b8c9646794f7d19c59523526cc3099e662b0cf0c89578eafa58e"} Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.100567 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"021cbaae85e15c04b7a740ec5a4fafb8bcfd38fdb996fd6ed29bc407567d1e77"} Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.102575 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f40e56c3f9abb7a0399606f8ef0c0b0408a895dbe98c6ac58b407aeafbb9891d"} Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.118032 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.135766 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.140792 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.156584 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.169766 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.203390 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.219886 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.484285 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.494262 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.507278 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.517252 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.517534 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.544422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.571370 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.588217 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.610703 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.627186 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.638512 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.651398 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.670460 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.688065 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.723434 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.758520 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.772510 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.772578 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.772611 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.772639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.772667 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772698 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:29:33.772671015 +0000 UTC m=+23.896612462 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772818 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772814 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772840 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772879 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772916 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772982 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.773002 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772927 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:33.772910072 +0000 UTC m=+23.896851529 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.773116 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:33.773090476 +0000 UTC m=+23.897031913 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.773130 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:33.773123547 +0000 UTC m=+23.897064984 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.772826 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: E1128 13:29:31.773168 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:33.773161808 +0000 UTC m=+23.897103245 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.782201 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:31 crc kubenswrapper[4857]: I1128 13:29:31.804858 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:31Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.107932 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908"} Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.108551 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77"} Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.111613 4857 scope.go:117] "RemoveContainer" containerID="ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.112152 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431"} Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.124298 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.140187 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.155904 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.175031 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.193388 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-dshsf"] Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.193828 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.194304 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-ggbvt"] Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.194682 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.194971 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rb7tq"] Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.195440 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.205372 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.205720 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-qbndv"] Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.206055 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.206201 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.206062 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.206641 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.207180 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.207628 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.208071 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.208270 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.215077 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.215828 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.216405 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.216367 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.216678 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.218120 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.218364 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.224408 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.227841 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:32 crc kubenswrapper[4857]: E1128 13:29:32.228231 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.227978 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:32 crc kubenswrapper[4857]: E1128 13:29:32.228486 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.227881 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:32 crc kubenswrapper[4857]: E1128 13:29:32.228690 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.243672 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.265706 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276120 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-kubelet\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276170 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d582p\" (UniqueName: \"kubernetes.io/projected/5d5445a4-417c-448a-a8a0-4a4f81828aff-kube-api-access-d582p\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276297 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/787c64de-9ce4-41eb-a525-948c23e84595-cni-binary-copy\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276354 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-cni-bin\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276413 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5d5445a4-417c-448a-a8a0-4a4f81828aff-proxy-tls\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276495 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/787c64de-9ce4-41eb-a525-948c23e84595-multus-daemon-config\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276530 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5d5445a4-417c-448a-a8a0-4a4f81828aff-rootfs\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276556 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zxxn\" (UniqueName: \"kubernetes.io/projected/3ca720c4-3756-47cc-b59d-3167a4141804-kube-api-access-5zxxn\") pod \"node-resolver-ggbvt\" (UID: \"3ca720c4-3756-47cc-b59d-3167a4141804\") " pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5d5445a4-417c-448a-a8a0-4a4f81828aff-mcd-auth-proxy-config\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276629 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-conf-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276659 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-system-cni-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276688 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-k8s-cni-cncf-io\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276712 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-netns\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmswj\" (UniqueName: \"kubernetes.io/projected/787c64de-9ce4-41eb-a525-948c23e84595-kube-api-access-rmswj\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276788 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-cnibin\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-os-release\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276875 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-socket-dir-parent\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276901 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-hostroot\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.276980 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-cni-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.277021 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-cni-multus\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.277116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-multus-certs\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.277201 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-etc-kubernetes\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.277230 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/3ca720c4-3756-47cc-b59d-3167a4141804-hosts-file\") pod \"node-resolver-ggbvt\" (UID: \"3ca720c4-3756-47cc-b59d-3167a4141804\") " pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.286475 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.301088 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.317540 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.334853 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.348781 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.362769 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377665 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5d5445a4-417c-448a-a8a0-4a4f81828aff-mcd-auth-proxy-config\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-system-cni-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377753 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-k8s-cni-cncf-io\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377790 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377813 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmswj\" (UniqueName: \"kubernetes.io/projected/787c64de-9ce4-41eb-a525-948c23e84595-kube-api-access-rmswj\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377836 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-socket-dir-parent\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377879 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-hostroot\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377907 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-cni-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377929 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-cni-multus\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.377971 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/3ca720c4-3756-47cc-b59d-3167a4141804-hosts-file\") pod \"node-resolver-ggbvt\" (UID: \"3ca720c4-3756-47cc-b59d-3167a4141804\") " pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378007 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-kubelet\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378033 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d582p\" (UniqueName: \"kubernetes.io/projected/5d5445a4-417c-448a-a8a0-4a4f81828aff-kube-api-access-d582p\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378058 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-cni-bin\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378080 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/787c64de-9ce4-41eb-a525-948c23e84595-multus-daemon-config\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378104 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cnibin\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378132 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6xvn\" (UniqueName: \"kubernetes.io/projected/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-kube-api-access-f6xvn\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zxxn\" (UniqueName: \"kubernetes.io/projected/3ca720c4-3756-47cc-b59d-3167a4141804-kube-api-access-5zxxn\") pod \"node-resolver-ggbvt\" (UID: \"3ca720c4-3756-47cc-b59d-3167a4141804\") " pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378181 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-conf-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378205 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-os-release\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378231 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-cnibin\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378254 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-netns\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378301 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-os-release\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378357 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-multus-certs\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378377 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5d5445a4-417c-448a-a8a0-4a4f81828aff-mcd-auth-proxy-config\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378384 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-etc-kubernetes\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378411 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-system-cni-dir\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378431 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-conf-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378436 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5d5445a4-417c-448a-a8a0-4a4f81828aff-proxy-tls\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378459 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cni-binary-copy\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378479 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-cni-bin\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378495 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/787c64de-9ce4-41eb-a525-948c23e84595-cni-binary-copy\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378518 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5d5445a4-417c-448a-a8a0-4a4f81828aff-rootfs\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378543 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378641 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-cnibin\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378681 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-netns\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378888 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-system-cni-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378925 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/787c64de-9ce4-41eb-a525-948c23e84595-multus-daemon-config\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.378975 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-k8s-cni-cncf-io\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379010 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-os-release\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379194 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-cni-dir\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379245 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-multus-socket-dir-parent\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379252 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-hostroot\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379289 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/3ca720c4-3756-47cc-b59d-3167a4141804-hosts-file\") pod \"node-resolver-ggbvt\" (UID: \"3ca720c4-3756-47cc-b59d-3167a4141804\") " pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379326 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-cni-multus\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379365 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-var-lib-kubelet\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/5d5445a4-417c-448a-a8a0-4a4f81828aff-rootfs\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379700 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-etc-kubernetes\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.379704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/787c64de-9ce4-41eb-a525-948c23e84595-host-run-multus-certs\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.380064 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/787c64de-9ce4-41eb-a525-948c23e84595-cni-binary-copy\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.384867 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/5d5445a4-417c-448a-a8a0-4a4f81828aff-proxy-tls\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.389269 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.398430 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmswj\" (UniqueName: \"kubernetes.io/projected/787c64de-9ce4-41eb-a525-948c23e84595-kube-api-access-rmswj\") pod \"multus-rb7tq\" (UID: \"787c64de-9ce4-41eb-a525-948c23e84595\") " pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.401207 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zxxn\" (UniqueName: \"kubernetes.io/projected/3ca720c4-3756-47cc-b59d-3167a4141804-kube-api-access-5zxxn\") pod \"node-resolver-ggbvt\" (UID: \"3ca720c4-3756-47cc-b59d-3167a4141804\") " pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.405256 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.414439 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d582p\" (UniqueName: \"kubernetes.io/projected/5d5445a4-417c-448a-a8a0-4a4f81828aff-kube-api-access-d582p\") pod \"machine-config-daemon-dshsf\" (UID: \"5d5445a4-417c-448a-a8a0-4a4f81828aff\") " pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.420213 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.435753 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.451571 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.464628 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.478595 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479047 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cnibin\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479074 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6xvn\" (UniqueName: \"kubernetes.io/projected/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-kube-api-access-f6xvn\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479092 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-os-release\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479134 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-system-cni-dir\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479138 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cnibin\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479150 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cni-binary-copy\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479190 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479210 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479216 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-system-cni-dir\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.479297 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-os-release\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.480084 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.480232 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-cni-binary-copy\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.497369 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6xvn\" (UniqueName: \"kubernetes.io/projected/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-kube-api-access-f6xvn\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.537528 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:29:32 crc kubenswrapper[4857]: W1128 13:29:32.548342 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d5445a4_417c_448a_a8a0_4a4f81828aff.slice/crio-959c692d2bf1ad26e9652b30ccb70777aeeeed0a95f8a8f904b216b67778f9ba WatchSource:0}: Error finding container 959c692d2bf1ad26e9652b30ccb70777aeeeed0a95f8a8f904b216b67778f9ba: Status 404 returned error can't find the container with id 959c692d2bf1ad26e9652b30ccb70777aeeeed0a95f8a8f904b216b67778f9ba Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.548879 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-ggbvt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.559626 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rb7tq" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.604998 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-h8td2"] Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.606025 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: W1128 13:29:32.610547 4857 reflector.go:561] object-"openshift-ovn-kubernetes"/"ovnkube-script-lib": failed to list *v1.ConfigMap: configmaps "ovnkube-script-lib" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 28 13:29:32 crc kubenswrapper[4857]: E1128 13:29:32.610723 4857 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"ovnkube-script-lib\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"ovnkube-script-lib\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 13:29:32 crc kubenswrapper[4857]: W1128 13:29:32.610336 4857 reflector.go:561] object-"openshift-ovn-kubernetes"/"env-overrides": failed to list *v1.ConfigMap: configmaps "env-overrides" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-ovn-kubernetes": no relationship found between node 'crc' and this object Nov 28 13:29:32 crc kubenswrapper[4857]: E1128 13:29:32.611048 4857 reflector.go:158] "Unhandled Error" err="object-\"openshift-ovn-kubernetes\"/\"env-overrides\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"env-overrides\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-ovn-kubernetes\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.616071 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.616343 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.616651 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.617113 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.617781 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.631324 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.648901 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.665502 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.676824 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qbndv\" (UID: \"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\") " pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.679663 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-ovn-kubernetes\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681144 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-config\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681168 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-systemd-units\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681276 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-log-socket\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681475 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-kubelet\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681514 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-etc-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681547 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-var-lib-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681603 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovn-node-metrics-cert\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681756 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brfsw\" (UniqueName: \"kubernetes.io/projected/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-kube-api-access-brfsw\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681882 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-env-overrides\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681915 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.681988 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-node-log\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682024 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-netd\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682052 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682088 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-netns\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682120 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-bin\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682181 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-systemd\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-slash\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.682392 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-ovn\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.698365 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.721247 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.738101 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.753245 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.769204 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783056 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-ovn\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783160 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-ovn-kubernetes\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783190 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-config\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783205 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-ovn\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783226 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-systemd-units\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783252 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-log-socket\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783277 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-kubelet\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783323 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-kubelet\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783335 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-log-socket\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783247 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-ovn-kubernetes\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783284 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-systemd-units\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783388 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-etc-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-var-lib-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783477 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-etc-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.783567 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-var-lib-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784110 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-config\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784562 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovn-node-metrics-cert\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784614 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brfsw\" (UniqueName: \"kubernetes.io/projected/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-kube-api-access-brfsw\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-env-overrides\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784744 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784772 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-node-log\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784795 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-netd\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784856 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-netns\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784877 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-bin\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784863 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-openvswitch\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784922 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-systemd\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784970 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-slash\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784978 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-node-log\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.784967 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.785036 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-systemd\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.785055 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-netns\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.785069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-bin\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.785056 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-netd\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.785121 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-slash\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.785420 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.787726 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovn-node-metrics-cert\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.802596 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brfsw\" (UniqueName: \"kubernetes.io/projected/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-kube-api-access-brfsw\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.811084 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.843737 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.867577 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-qbndv" Nov 28 13:29:32 crc kubenswrapper[4857]: I1128 13:29:32.883929 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:32Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:32 crc kubenswrapper[4857]: W1128 13:29:32.887224 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1dbaee30_9cc4_4ff6_b89f_afe2b1a1769b.slice/crio-006efed507262ca5d5e5e9e2781946c3a4bac2ed8ca15e20268ad2d5b7ab3eda WatchSource:0}: Error finding container 006efed507262ca5d5e5e9e2781946c3a4bac2ed8ca15e20268ad2d5b7ab3eda: Status 404 returned error can't find the container with id 006efed507262ca5d5e5e9e2781946c3a4bac2ed8ca15e20268ad2d5b7ab3eda Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.006015 4857 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.007932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.007978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.007989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.008115 4857 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.022282 4857 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.022603 4857 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.023593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.023628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.023639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.023657 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.023667 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.052449 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.056422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.056482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.056497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.056521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.056540 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.077130 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.081429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.081470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.081485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.081502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.081514 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.096447 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.102915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.102980 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.102991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.103009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.103021 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.115966 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerStarted","Data":"006efed507262ca5d5e5e9e2781946c3a4bac2ed8ca15e20268ad2d5b7ab3eda"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.119169 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.119201 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"959c692d2bf1ad26e9652b30ccb70777aeeeed0a95f8a8f904b216b67778f9ba"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.122523 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.124828 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.126621 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.127167 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.128014 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerStarted","Data":"d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.128042 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerStarted","Data":"5e78273d7d9281dc7240f08c9dd6082819813a6574fc9586ea14be277ea77fda"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.130643 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-ggbvt" event={"ID":"3ca720c4-3756-47cc-b59d-3167a4141804","Type":"ContainerStarted","Data":"ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.130670 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-ggbvt" event={"ID":"3ca720c4-3756-47cc-b59d-3167a4141804","Type":"ContainerStarted","Data":"ca33e7defb278e63d9f84017a1e7f737804de604162ccade7de1ff96c519b301"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.133011 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.133052 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.133063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.133082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.133095 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.148233 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.148319 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.148501 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.150357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.150389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.150399 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.150417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.150426 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.162896 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.183333 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.206819 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.222830 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.238183 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.252975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.253034 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.253047 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.253071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.253088 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.254465 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.270381 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.288683 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.309144 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.323961 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.338294 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.355628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.355677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.355690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.355710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.355720 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.356243 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.372897 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.386396 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.399843 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.416731 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.430342 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.446047 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.459117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.459172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.459186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.459205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.459220 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.463394 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.477281 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.493861 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.513353 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.536318 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.546961 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.556188 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-env-overrides\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.556608 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.562172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.562237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.562250 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.562274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.562287 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.574965 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:33Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.667390 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.667913 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.667925 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.667977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.667998 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.770230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.770271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.770282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.770299 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.770316 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.785840 4857 configmap.go:193] Couldn't get configMap openshift-ovn-kubernetes/ovnkube-script-lib: failed to sync configmap cache: timed out waiting for the condition Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.785977 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib podName:46c5e02c-be1a-45b7-86ef-cc8c484c4f71 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:34.285935696 +0000 UTC m=+24.409877133 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "ovnkube-script-lib" (UniqueName: "kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib") pod "ovnkube-node-h8td2" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71") : failed to sync configmap cache: timed out waiting for the condition Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.798852 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.798990 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.799035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.799060 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.799112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799199 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799253 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:37.799236621 +0000 UTC m=+27.923178058 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799316 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:29:37.799308473 +0000 UTC m=+27.923249910 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799423 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799439 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799455 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799484 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:37.799475878 +0000 UTC m=+27.923417315 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799538 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799563 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:37.79955616 +0000 UTC m=+27.923497597 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799618 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799630 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799640 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:33 crc kubenswrapper[4857]: E1128 13:29:33.799664 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:37.799656652 +0000 UTC m=+27.923598089 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.873037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.873075 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.873083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.873098 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.873111 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.923578 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.976266 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.976333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.976343 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.976360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:33 crc kubenswrapper[4857]: I1128 13:29:33.976372 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:33Z","lastTransitionTime":"2025-11-28T13:29:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.078924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.079005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.079016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.079036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.079051 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.134607 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.136962 4857 generic.go:334] "Generic (PLEG): container finished" podID="1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b" containerID="fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa" exitCode=0 Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.137075 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerDied","Data":"fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.139047 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.151739 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.171308 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.181811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.181880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.181894 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.181913 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.181927 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.183985 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.199229 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.212909 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.228039 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.228074 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.228046 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:34 crc kubenswrapper[4857]: E1128 13:29:34.228179 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:34 crc kubenswrapper[4857]: E1128 13:29:34.228262 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:34 crc kubenswrapper[4857]: E1128 13:29:34.228336 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.234436 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.255074 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.272402 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.283832 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.283861 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.283870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.283884 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.283893 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.286648 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.304973 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.305716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib\") pod \"ovnkube-node-h8td2\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.306909 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.326882 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.341908 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.358508 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.373084 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.387371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.387405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.387416 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.387435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.387445 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.389523 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.404406 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.418121 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.427736 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.428726 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.441070 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.459445 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.473722 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.490243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.490304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.490319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.490337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.490350 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.492039 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.512568 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.534470 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-cs9jw"] Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.535272 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.537693 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.537739 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.538264 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.538334 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.544904 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.560874 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.573612 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.592407 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.594476 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.594517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.594534 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.594559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.594581 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.608529 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: W1128 13:29:34.619442 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod46c5e02c_be1a_45b7_86ef_cc8c484c4f71.slice/crio-4ba54e9dd19b060f17879becff8f7a30408c8239ce144a6b05f961e92de8a9c1 WatchSource:0}: Error finding container 4ba54e9dd19b060f17879becff8f7a30408c8239ce144a6b05f961e92de8a9c1: Status 404 returned error can't find the container with id 4ba54e9dd19b060f17879becff8f7a30408c8239ce144a6b05f961e92de8a9c1 Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.630596 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.644082 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.662385 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.683545 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.697802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.698159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.698171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.698190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.698203 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.700106 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.708083 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-serviceca\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.708148 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-host\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.708176 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zchsr\" (UniqueName: \"kubernetes.io/projected/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-kube-api-access-zchsr\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.716108 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.730782 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.744393 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.760222 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.776126 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.788539 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.800668 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:34Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.801213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.801254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.801266 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.801281 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.801291 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.808579 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-serviceca\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.808630 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-host\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.808657 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zchsr\" (UniqueName: \"kubernetes.io/projected/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-kube-api-access-zchsr\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.808816 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-host\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.809560 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-serviceca\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.830833 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zchsr\" (UniqueName: \"kubernetes.io/projected/c15e860e-f7d7-4bdb-b09b-b6099204b5e4-kube-api-access-zchsr\") pod \"node-ca-cs9jw\" (UID: \"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\") " pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.856995 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-cs9jw" Nov 28 13:29:34 crc kubenswrapper[4857]: W1128 13:29:34.868279 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc15e860e_f7d7_4bdb_b09b_b6099204b5e4.slice/crio-c0accbf319b1ebb2fa9ee15512cd1a1948a9202d1e648f270f53ecb5a257c177 WatchSource:0}: Error finding container c0accbf319b1ebb2fa9ee15512cd1a1948a9202d1e648f270f53ecb5a257c177: Status 404 returned error can't find the container with id c0accbf319b1ebb2fa9ee15512cd1a1948a9202d1e648f270f53ecb5a257c177 Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.904717 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.904764 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.904776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.904791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:34 crc kubenswrapper[4857]: I1128 13:29:34.904804 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:34Z","lastTransitionTime":"2025-11-28T13:29:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.009279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.009331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.009340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.009360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.009371 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.112969 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.113385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.113395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.113411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.113425 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.144793 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb" exitCode=0 Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.144825 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.144860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"4ba54e9dd19b060f17879becff8f7a30408c8239ce144a6b05f961e92de8a9c1"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.150272 4857 generic.go:334] "Generic (PLEG): container finished" podID="1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b" containerID="f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173" exitCode=0 Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.150364 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerDied","Data":"f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.152656 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-cs9jw" event={"ID":"c15e860e-f7d7-4bdb-b09b-b6099204b5e4","Type":"ContainerStarted","Data":"c0accbf319b1ebb2fa9ee15512cd1a1948a9202d1e648f270f53ecb5a257c177"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.162760 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.180874 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.194901 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.208696 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.216105 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.216134 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.216145 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.216161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.216172 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.222987 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.238976 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.254090 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.266693 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.281182 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.294989 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.310916 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.318764 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.318908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.318919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.318938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.318965 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.324719 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.339848 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.363901 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.401516 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.421582 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.421618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.421628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.421646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.421657 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.443908 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.484191 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.525092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.525169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.525195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.525229 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.525252 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.527511 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.566089 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.603038 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.628462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.628560 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.628575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.628600 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.628614 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.642883 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.687287 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.722531 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.734530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.734575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.734590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.734609 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.734623 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.765812 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.804288 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.838350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.838413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.838430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.838454 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.838471 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.845499 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.883821 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.923710 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.941225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.941284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.941301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.941327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:35 crc kubenswrapper[4857]: I1128 13:29:35.941344 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:35Z","lastTransitionTime":"2025-11-28T13:29:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.044389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.044475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.044487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.044514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.044529 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.147284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.147736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.147746 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.147762 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.147774 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.159494 4857 generic.go:334] "Generic (PLEG): container finished" podID="1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b" containerID="f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae" exitCode=0 Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.159579 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerDied","Data":"f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.161501 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-cs9jw" event={"ID":"c15e860e-f7d7-4bdb-b09b-b6099204b5e4","Type":"ContainerStarted","Data":"6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.168100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.168166 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.168189 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.168202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.168215 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.168229 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.179679 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.197882 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.211089 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.228196 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.228247 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.229260 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.229283 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: E1128 13:29:36.229364 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:36 crc kubenswrapper[4857]: E1128 13:29:36.229423 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:36 crc kubenswrapper[4857]: E1128 13:29:36.229562 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.244062 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.249785 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.249843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.249856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.249874 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.249886 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.259605 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.277227 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.321620 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.343005 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.355874 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.355911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.355924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.355962 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.355979 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.363984 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.382488 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.399783 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.440413 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.458264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.458311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.458323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.458339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.458351 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.483958 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.518484 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.561389 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.561797 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.561848 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.561858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.561874 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.561883 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.600413 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.644957 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.664705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.664743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.664753 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.664771 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.664785 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.681089 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.723257 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.760791 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.767271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.767302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.767313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.767330 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.767342 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.801167 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.841594 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.869318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.869355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.869365 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.869378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.869388 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.879084 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.920649 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.962530 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.971871 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.971912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.971922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.971938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:36 crc kubenswrapper[4857]: I1128 13:29:36.971969 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:36Z","lastTransitionTime":"2025-11-28T13:29:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.006115 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.046698 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.074564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.074640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.074653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.074864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.074879 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.174973 4857 generic.go:334] "Generic (PLEG): container finished" podID="1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b" containerID="b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5" exitCode=0 Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.175053 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerDied","Data":"b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.177504 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.177547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.177559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.177576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.177590 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.192764 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.212340 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.229659 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.242352 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.254349 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.280442 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.280482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.280543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.280569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.280582 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.285165 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.325539 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.359220 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.385778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.385834 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.385847 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.385866 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.385879 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.402086 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.442084 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.482636 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.488568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.488611 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.488624 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.488646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.488659 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.522007 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.561395 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.591296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.591350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.591369 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.591390 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.591401 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.609452 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.694308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.694366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.694379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.694398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.694414 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.798187 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.798276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.798301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.798334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.798359 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.841818 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.842034 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842079 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:29:45.842036227 +0000 UTC m=+35.965977744 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842134 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842199 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:45.842186381 +0000 UTC m=+35.966127828 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.842135 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.842259 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.842287 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842310 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842344 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842372 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842380 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842411 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:45.842402387 +0000 UTC m=+35.966343834 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842444 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:45.842420967 +0000 UTC m=+35.966362454 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842476 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842525 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842540 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:37 crc kubenswrapper[4857]: E1128 13:29:37.842620 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:29:45.842598712 +0000 UTC m=+35.966540229 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.902207 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.902261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.902278 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.902301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:37 crc kubenswrapper[4857]: I1128 13:29:37.902316 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:37Z","lastTransitionTime":"2025-11-28T13:29:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.005456 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.005526 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.005546 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.005576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.005593 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.108188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.108250 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.108264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.108287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.108302 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.184762 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.187174 4857 generic.go:334] "Generic (PLEG): container finished" podID="1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b" containerID="d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38" exitCode=0 Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.187222 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerDied","Data":"d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.200790 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.210823 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.210895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.210909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.210930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.210974 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.223421 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.228836 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.228917 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:38 crc kubenswrapper[4857]: E1128 13:29:38.228975 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:38 crc kubenswrapper[4857]: E1128 13:29:38.229098 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.229269 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:38 crc kubenswrapper[4857]: E1128 13:29:38.229342 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.239231 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.253504 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.265646 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.282773 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.297558 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.310422 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.313184 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.313221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.313230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.313246 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.313260 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.326660 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.337794 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.350855 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.365104 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.383769 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.401518 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:38Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.419050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.419123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.419139 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.419168 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.419186 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.522487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.522534 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.522547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.522568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.522582 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.625643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.625976 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.625987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.626005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.626018 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.733183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.733233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.733248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.733304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.733326 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.836243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.836292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.836305 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.836323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.836334 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.939224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.939264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.939276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.939293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:38 crc kubenswrapper[4857]: I1128 13:29:38.939306 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:38Z","lastTransitionTime":"2025-11-28T13:29:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.043078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.043133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.043146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.043170 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.043184 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.146095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.146160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.146182 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.146209 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.146226 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.193775 4857 generic.go:334] "Generic (PLEG): container finished" podID="1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b" containerID="ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350" exitCode=0 Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.193854 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerDied","Data":"ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.219634 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.240353 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.248575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.248624 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.248639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.248656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.248669 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.258013 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.274574 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.298047 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.318706 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.332176 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.348481 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.351189 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.351213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.351222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.351235 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.351244 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.361581 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.374974 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.391848 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.405340 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.418541 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.428550 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:39Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.453495 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.453542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.453554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.453572 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.453584 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.556095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.556144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.556155 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.556172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.556185 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.658559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.658632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.658646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.658664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.658677 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.761755 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.761823 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.761839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.761866 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.761884 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.864638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.864686 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.864697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.864718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.864731 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.967313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.967388 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.967411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.967439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:39 crc kubenswrapper[4857]: I1128 13:29:39.967463 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:39Z","lastTransitionTime":"2025-11-28T13:29:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.070595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.071111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.071126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.071147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.071162 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.174679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.174767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.174787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.174843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.174862 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.203582 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" event={"ID":"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b","Type":"ContainerStarted","Data":"52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.222690 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.227834 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:40 crc kubenswrapper[4857]: E1128 13:29:40.228050 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.228167 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:40 crc kubenswrapper[4857]: E1128 13:29:40.228312 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.229020 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:40 crc kubenswrapper[4857]: E1128 13:29:40.229230 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.244505 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.260606 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.277025 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.278279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.278342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.278362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.278391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.278411 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.295290 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.315653 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.327971 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.345918 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.359458 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.374141 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.380915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.380988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.381002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.381023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.381043 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.386580 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.401213 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.414368 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.428138 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.449902 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.465536 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.481840 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.483919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.484284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.484345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.484409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.484469 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.499567 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.516856 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.530701 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.550337 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.575369 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.587314 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.587371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.587384 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.587404 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.587418 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.591669 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.603411 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.617861 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.632647 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.650884 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.666732 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.690101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.690147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.690159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.690178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.690190 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.793129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.793192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.793212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.793236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.793254 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.895721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.895767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.895779 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.895796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.895807 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.997680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.997709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.997719 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.997736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:40 crc kubenswrapper[4857]: I1128 13:29:40.997745 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:40Z","lastTransitionTime":"2025-11-28T13:29:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.099888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.099937 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.100001 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.100018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.100030 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.202353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.202401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.202409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.202426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.202437 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.210562 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.304422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.304479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.304493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.304512 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.304529 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.407920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.408032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.408053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.408079 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.408101 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.511461 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.511549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.511585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.511616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.511693 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.614362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.614424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.614438 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.614463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.614477 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.718257 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.718338 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.718366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.718406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.718431 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.822085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.822156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.822176 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.822218 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.822237 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.925285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.925357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.925376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.925404 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:41 crc kubenswrapper[4857]: I1128 13:29:41.925422 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:41Z","lastTransitionTime":"2025-11-28T13:29:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.028477 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.028538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.028553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.028576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.028590 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.131813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.131880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.131893 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.131912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.131926 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.215287 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.215354 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.228463 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.228472 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:42 crc kubenswrapper[4857]: E1128 13:29:42.228702 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.228849 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:42 crc kubenswrapper[4857]: E1128 13:29:42.228879 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:42 crc kubenswrapper[4857]: E1128 13:29:42.228971 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.233940 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.235629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.235674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.235687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.235701 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.235714 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.245423 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.248297 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.251896 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.265067 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.289477 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.313808 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.329221 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.339497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.339549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.339562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.339583 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.339600 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.343091 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.353762 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.368732 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.384522 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.398067 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.423446 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.442496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.442590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.442605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.442628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.442640 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.481983 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.492672 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.506507 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.522057 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.540223 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.545142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.545198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.545210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.545230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.545251 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.563030 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.579108 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.591938 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.605680 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.618092 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.633039 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.649277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.649339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.649366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.649405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.649431 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.651832 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.668518 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.683315 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.701472 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.720531 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:42Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.753154 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.753208 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.753219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.753240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.753251 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.856556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.856610 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.856619 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.856643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.856653 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.959600 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.959644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.959655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.959672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:42 crc kubenswrapper[4857]: I1128 13:29:42.959686 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:42Z","lastTransitionTime":"2025-11-28T13:29:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.062489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.062589 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.062614 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.062646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.062674 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.166245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.166315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.166333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.166361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.166380 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.221688 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.269907 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.269972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.269981 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.269995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.270007 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.331082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.331178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.331215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.331280 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.331310 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: E1128 13:29:43.350166 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:43Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.354854 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.354904 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.354920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.354942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.354981 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: E1128 13:29:43.373789 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:43Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.380852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.380898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.380924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.380965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.380978 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: E1128 13:29:43.410327 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:43Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.418479 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.418530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.418539 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.418553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.418563 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: E1128 13:29:43.433824 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:43Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.439451 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.439498 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.439507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.439520 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.439531 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: E1128 13:29:43.454766 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:43Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:43 crc kubenswrapper[4857]: E1128 13:29:43.454936 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.457158 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.457212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.457225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.457244 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.457261 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.559983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.560014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.560025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.560040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.560049 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.663510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.663547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.663557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.663571 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.663580 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.766866 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.766914 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.766923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.766938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.766963 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.871042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.871368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.871378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.871393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.871406 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.974720 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.974767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.974775 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.974793 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:43 crc kubenswrapper[4857]: I1128 13:29:43.974804 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:43Z","lastTransitionTime":"2025-11-28T13:29:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.077509 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.077551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.077561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.077592 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.077602 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.180557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.180631 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.180642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.180677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.180689 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.227726 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/0.log" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.227988 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.228020 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:44 crc kubenswrapper[4857]: E1128 13:29:44.228133 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.228207 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:44 crc kubenswrapper[4857]: E1128 13:29:44.228246 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:44 crc kubenswrapper[4857]: E1128 13:29:44.228417 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.231550 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86" exitCode=1 Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.232494 4857 scope.go:117] "RemoveContainer" containerID="1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.233634 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.248378 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.257562 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb"] Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.258011 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.260346 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.260620 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.265402 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.279172 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.283053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.283097 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.283106 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.283123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.283134 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.299334 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.312921 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfpzd\" (UniqueName: \"kubernetes.io/projected/adf71647-2fc9-4497-81ee-84e6373498b4-kube-api-access-mfpzd\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.312991 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/adf71647-2fc9-4497-81ee-84e6373498b4-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.313237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/adf71647-2fc9-4497-81ee-84e6373498b4-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.313324 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/adf71647-2fc9-4497-81ee-84e6373498b4-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.313484 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.327755 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.341937 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.353861 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.369836 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.385983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.386029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.386042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.386061 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.386358 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.389053 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.403910 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.414283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/adf71647-2fc9-4497-81ee-84e6373498b4-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.415084 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/adf71647-2fc9-4497-81ee-84e6373498b4-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.415137 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/adf71647-2fc9-4497-81ee-84e6373498b4-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.415199 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfpzd\" (UniqueName: \"kubernetes.io/projected/adf71647-2fc9-4497-81ee-84e6373498b4-kube-api-access-mfpzd\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.415471 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/adf71647-2fc9-4497-81ee-84e6373498b4-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.415970 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/adf71647-2fc9-4497-81ee-84e6373498b4-env-overrides\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.418417 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.426461 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/adf71647-2fc9-4497-81ee-84e6373498b4-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.441245 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.442279 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfpzd\" (UniqueName: \"kubernetes.io/projected/adf71647-2fc9-4497-81ee-84e6373498b4-kube-api-access-mfpzd\") pod \"ovnkube-control-plane-749d76644c-kmjsb\" (UID: \"adf71647-2fc9-4497-81ee-84e6373498b4\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.465183 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\" 6201 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:29:43.957801 6201 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:29:43.957878 6201 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:29:43.957901 6201 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:29:43.957910 6201 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:29:43.957981 6201 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:29:43.957987 6201 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:29:43.958010 6201 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 13:29:43.958026 6201 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:29:43.958034 6201 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:29:43.958053 6201 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:29:43.958065 6201 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:29:43.958089 6201 factory.go:656] Stopping watch factory\\\\nI1128 13:29:43.958089 6201 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:29:43.958111 6201 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:29:43.958109 6201 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.480683 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.488997 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.489035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.489044 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.489060 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.489079 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.497764 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.514779 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.527645 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.541102 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.560371 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.574705 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.585097 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.592307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.592370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.592386 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.592410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.592427 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.598520 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.603573 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.628284 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.659425 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.678003 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.695481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.695533 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.695542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.695560 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.695571 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.708988 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.730230 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\" 6201 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:29:43.957801 6201 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:29:43.957878 6201 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:29:43.957901 6201 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:29:43.957910 6201 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:29:43.957981 6201 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:29:43.957987 6201 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:29:43.958010 6201 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 13:29:43.958026 6201 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:29:43.958034 6201 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:29:43.958053 6201 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:29:43.958065 6201 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:29:43.958089 6201 factory.go:656] Stopping watch factory\\\\nI1128 13:29:43.958089 6201 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:29:43.958111 6201 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:29:43.958109 6201 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.743768 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:44Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.798063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.798120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.798133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.798153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.798169 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.900481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.900550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.900564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.900583 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:44 crc kubenswrapper[4857]: I1128 13:29:44.900595 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:44Z","lastTransitionTime":"2025-11-28T13:29:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.003376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.003425 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.003437 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.003454 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.003468 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.106262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.106648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.106660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.106676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.106689 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.209439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.209484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.209493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.209508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.209518 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.235459 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" event={"ID":"adf71647-2fc9-4497-81ee-84e6373498b4","Type":"ContainerStarted","Data":"2f6b82b1912a9fc0611213b2e3c627b67f349d97a729ed77bd65fa687b79508d"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.237560 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/0.log" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.240193 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.240392 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.258853 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.271129 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.281686 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.293435 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.305967 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.311155 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.311189 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.311199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.311213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.311226 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.318122 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.332350 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.353332 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\" 6201 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:29:43.957801 6201 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:29:43.957878 6201 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:29:43.957901 6201 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:29:43.957910 6201 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:29:43.957981 6201 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:29:43.957987 6201 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:29:43.958010 6201 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 13:29:43.958026 6201 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:29:43.958034 6201 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:29:43.958053 6201 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:29:43.958065 6201 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:29:43.958089 6201 factory.go:656] Stopping watch factory\\\\nI1128 13:29:43.958089 6201 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:29:43.958111 6201 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:29:43.958109 6201 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.366587 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.381979 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.396791 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.408351 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.413392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.413425 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.413435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.413452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.413461 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.422928 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.434325 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.447595 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.516072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.516114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.516122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.516136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.516146 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.618103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.618161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.618183 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.618204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.618221 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.726656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.726715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.726728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.726752 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.726772 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.733366 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-26tq7"] Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.734108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.734216 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.751084 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.770536 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.788459 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.804114 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.828442 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.828553 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gszj\" (UniqueName: \"kubernetes.io/projected/0cf43f51-b79b-49fc-85ca-a245a248f27a-kube-api-access-7gszj\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.828611 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.830164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.830199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.830214 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.830235 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.830246 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.842769 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.858931 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.880688 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.896962 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.914275 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.926094 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929113 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929302 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:30:01.929272641 +0000 UTC m=+52.053214078 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929377 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929476 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929495 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gszj\" (UniqueName: \"kubernetes.io/projected/0cf43f51-b79b-49fc-85ca-a245a248f27a-kube-api-access-7gszj\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.929523 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929649 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929691 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:29:46.429682552 +0000 UTC m=+36.553623989 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929786 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929894 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:01.929877527 +0000 UTC m=+52.053818964 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929926 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930078 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930151 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.929968 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930287 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930311 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930232 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:01.930223906 +0000 UTC m=+52.054165343 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930471 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.930465 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:01.93038391 +0000 UTC m=+52.054325367 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:29:45 crc kubenswrapper[4857]: E1128 13:29:45.931066 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:01.930535714 +0000 UTC m=+52.054477331 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.932280 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.932306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.932315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.932332 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.932342 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:45Z","lastTransitionTime":"2025-11-28T13:29:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.949821 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.967382 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gszj\" (UniqueName: \"kubernetes.io/projected/0cf43f51-b79b-49fc-85ca-a245a248f27a-kube-api-access-7gszj\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.971063 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:45 crc kubenswrapper[4857]: I1128 13:29:45.991658 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.013557 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\" 6201 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:29:43.957801 6201 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:29:43.957878 6201 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:29:43.957901 6201 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:29:43.957910 6201 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:29:43.957981 6201 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:29:43.957987 6201 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:29:43.958010 6201 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 13:29:43.958026 6201 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:29:43.958034 6201 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:29:43.958053 6201 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:29:43.958065 6201 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:29:43.958089 6201 factory.go:656] Stopping watch factory\\\\nI1128 13:29:43.958089 6201 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:29:43.958111 6201 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:29:43.958109 6201 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.028106 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.035304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.035364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.035381 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.035410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.035426 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.139088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.139150 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.139164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.139185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.139204 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.228802 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.228864 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.228869 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:46 crc kubenswrapper[4857]: E1128 13:29:46.229000 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:46 crc kubenswrapper[4857]: E1128 13:29:46.229102 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:46 crc kubenswrapper[4857]: E1128 13:29:46.229211 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.242662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.243717 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.243833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.243928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.244051 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.248330 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" event={"ID":"adf71647-2fc9-4497-81ee-84e6373498b4","Type":"ContainerStarted","Data":"91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.248473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" event={"ID":"adf71647-2fc9-4497-81ee-84e6373498b4","Type":"ContainerStarted","Data":"57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.251564 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/1.log" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.252480 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/0.log" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.256189 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167" exitCode=1 Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.256384 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.256534 4857 scope.go:117] "RemoveContainer" containerID="1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.257693 4857 scope.go:117] "RemoveContainer" containerID="3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167" Nov 28 13:29:46 crc kubenswrapper[4857]: E1128 13:29:46.258145 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.272481 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.287294 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.303065 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.316893 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.330249 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.347445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.347489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.347501 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.347522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.347537 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.352077 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.380091 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\" 6201 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:29:43.957801 6201 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:29:43.957878 6201 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:29:43.957901 6201 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:29:43.957910 6201 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:29:43.957981 6201 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:29:43.957987 6201 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:29:43.958010 6201 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 13:29:43.958026 6201 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:29:43.958034 6201 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:29:43.958053 6201 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:29:43.958065 6201 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:29:43.958089 6201 factory.go:656] Stopping watch factory\\\\nI1128 13:29:43.958089 6201 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:29:43.958111 6201 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:29:43.958109 6201 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.396725 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.415242 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.433903 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.435379 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:46 crc kubenswrapper[4857]: E1128 13:29:46.435621 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:46 crc kubenswrapper[4857]: E1128 13:29:46.435774 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:29:47.435734016 +0000 UTC m=+37.559675493 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.456216 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.456366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.456421 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.456448 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.456497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.456522 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.472760 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.488163 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.507919 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.522831 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.534767 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.549265 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.560732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.561047 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.561337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.561528 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.561719 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.563940 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.581345 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.599722 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.622224 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1fd0d2db0aa3d222bb59ed08a30ae2c9e70c02757d320c52509c589b23ff9a86\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"message\\\":\\\" 6201 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1128 13:29:43.957801 6201 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1128 13:29:43.957878 6201 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1128 13:29:43.957901 6201 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1128 13:29:43.957910 6201 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1128 13:29:43.957981 6201 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1128 13:29:43.957987 6201 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1128 13:29:43.958010 6201 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1128 13:29:43.958026 6201 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1128 13:29:43.958034 6201 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1128 13:29:43.958053 6201 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1128 13:29:43.958065 6201 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1128 13:29:43.958089 6201 factory.go:656] Stopping watch factory\\\\nI1128 13:29:43.958089 6201 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1128 13:29:43.958111 6201 ovnkube.go:599] Stopped ovnkube\\\\nI1128 13:29:43.958109 6201 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.637201 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.652147 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.667060 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.667150 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.667237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.667288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.667309 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.667876 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.683138 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.698226 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.710720 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.726863 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.741172 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.754530 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.770328 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.771461 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.771570 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.771584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.771608 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.771622 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.797087 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:46Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.875521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.876023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.876196 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.876332 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.876434 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.979710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.980229 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.980449 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.980720 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:46 crc kubenswrapper[4857]: I1128 13:29:46.980942 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:46Z","lastTransitionTime":"2025-11-28T13:29:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.084275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.084660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.084780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.085295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.085442 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.188052 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.188109 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.188123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.188145 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.188158 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.228862 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:47 crc kubenswrapper[4857]: E1128 13:29:47.229122 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.263571 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/1.log" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.268874 4857 scope.go:117] "RemoveContainer" containerID="3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167" Nov 28 13:29:47 crc kubenswrapper[4857]: E1128 13:29:47.269018 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.285838 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.290419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.290493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.290505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.290519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.290531 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.302351 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.327216 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.354731 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.370839 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.391439 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.393181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.393228 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.393242 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.393260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.393273 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.413162 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.434138 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.445597 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:47 crc kubenswrapper[4857]: E1128 13:29:47.445805 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:47 crc kubenswrapper[4857]: E1128 13:29:47.445914 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:29:49.445886473 +0000 UTC m=+39.569827950 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.448684 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.465870 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.482089 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.495816 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.495887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.495922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.495982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.495998 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.497588 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.509306 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.523240 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.537713 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.552784 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:47Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.598830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.598901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.598918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.598965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.598980 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.702640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.702712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.702734 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.702765 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.702787 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.806393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.806447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.806459 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.806481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.806493 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.909593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.909661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.909679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.909704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:47 crc kubenswrapper[4857]: I1128 13:29:47.909719 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:47Z","lastTransitionTime":"2025-11-28T13:29:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.013617 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.013697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.013717 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.013745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.013766 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.117066 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.117146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.117166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.117193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.117215 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.220718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.220779 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.220787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.220835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.220848 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.228481 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.228606 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:48 crc kubenswrapper[4857]: E1128 13:29:48.228759 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.228887 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:48 crc kubenswrapper[4857]: E1128 13:29:48.228935 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:48 crc kubenswrapper[4857]: E1128 13:29:48.229171 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.324427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.324493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.324504 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.324523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.324534 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.427232 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.427313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.427334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.427364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.427385 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.530215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.530306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.530333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.530356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.530377 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.632819 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.632877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.632891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.632911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.632928 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.736383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.736431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.736447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.736466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.736481 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.839129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.839478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.839617 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.839754 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.839886 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.943784 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.943847 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.943858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.943875 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:48 crc kubenswrapper[4857]: I1128 13:29:48.943898 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:48Z","lastTransitionTime":"2025-11-28T13:29:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.047355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.047424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.047446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.047474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.047493 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.154293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.154376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.154399 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.154439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.154460 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.228197 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:49 crc kubenswrapper[4857]: E1128 13:29:49.228429 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.258511 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.258590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.258609 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.258638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.258656 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.361625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.361676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.361687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.361704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.361715 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.464866 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.464904 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.464912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.464927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.464937 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.470361 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:49 crc kubenswrapper[4857]: E1128 13:29:49.470557 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:49 crc kubenswrapper[4857]: E1128 13:29:49.470614 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:29:53.470601112 +0000 UTC m=+43.594542549 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.568043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.568090 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.568103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.568118 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.568130 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.670334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.670403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.670413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.670427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.670438 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.773382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.773427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.773435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.773449 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.773459 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.857004 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.870478 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.875648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.875689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.875700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.875718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.875730 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.887499 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.900418 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.927498 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.944558 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.960287 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.979040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.979088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.979099 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.979120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.979132 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:49Z","lastTransitionTime":"2025-11-28T13:29:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:49 crc kubenswrapper[4857]: I1128 13:29:49.981237 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.001562 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:49Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.034313 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.049894 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.069214 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.081978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.082028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.082041 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.082063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.082079 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.084318 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.096151 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.112442 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.127906 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.144332 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.184852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.184890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.184900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.184915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.184925 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.228701 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.228787 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:50 crc kubenswrapper[4857]: E1128 13:29:50.228835 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:50 crc kubenswrapper[4857]: E1128 13:29:50.229280 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.229333 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:50 crc kubenswrapper[4857]: E1128 13:29:50.229540 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.248673 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.263704 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.277153 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.287722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.287770 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.287781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.287808 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.287820 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.294357 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.316644 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.340177 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.352507 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.365040 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.379793 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.390304 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.390692 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.390794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.390888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.390963 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.394843 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.406271 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.420192 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.434087 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.450453 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.468571 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.479807 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.493749 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.493790 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.493800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.493818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.493832 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.596617 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.596664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.596676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.596693 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.596707 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.698749 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.698791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.698799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.698814 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.698824 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.801935 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.802010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.802022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.802039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.802052 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.904677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.904769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.904796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.904833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:50 crc kubenswrapper[4857]: I1128 13:29:50.904859 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:50Z","lastTransitionTime":"2025-11-28T13:29:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.007669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.007750 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.007769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.007798 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.007825 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.110116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.110175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.110193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.110217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.110235 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.213272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.213357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.213376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.213401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.213420 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.228706 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:51 crc kubenswrapper[4857]: E1128 13:29:51.228898 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.316152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.316233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.316260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.316293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.316317 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.418753 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.418825 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.418843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.418866 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.418885 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.522697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.522766 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.522788 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.522822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.522847 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.625722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.626265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.626415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.626549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.626682 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.729827 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.729862 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.729870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.729910 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.729921 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.833469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.833536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.833559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.833595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.833623 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.937302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.937371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.937392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.937418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:51 crc kubenswrapper[4857]: I1128 13:29:51.937438 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:51Z","lastTransitionTime":"2025-11-28T13:29:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.041889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.041973 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.041986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.042009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.042024 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.145573 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.145913 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.146095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.146284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.146435 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.228775 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.228835 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.228800 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:52 crc kubenswrapper[4857]: E1128 13:29:52.228974 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:52 crc kubenswrapper[4857]: E1128 13:29:52.229121 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:52 crc kubenswrapper[4857]: E1128 13:29:52.229217 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.248880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.249136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.249258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.249342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.249442 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.351814 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.351869 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.351881 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.351896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.351907 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.455514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.455581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.455606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.455639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.455669 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.558452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.558611 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.558646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.558680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.558709 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.661911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.661989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.662000 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.662018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.662030 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.764896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.765009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.765029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.765052 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.765072 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.867649 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.867715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.867735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.867760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.867780 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.971895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.971991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.972008 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.972029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:52 crc kubenswrapper[4857]: I1128 13:29:52.972043 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:52Z","lastTransitionTime":"2025-11-28T13:29:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.074805 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.074920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.074931 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.074967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.074979 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.178143 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.178229 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.178253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.178282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.178308 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.228491 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.228629 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.280509 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.280569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.280581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.280595 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.280934 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.384398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.384467 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.384491 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.384521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.384544 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.488075 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.488144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.488162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.488185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.488203 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.514820 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.515139 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.515243 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:30:01.515215245 +0000 UTC m=+51.639156722 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.591928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.592033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.592053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.592088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.592117 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.694786 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.694841 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.694864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.694895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.694917 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.798431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.798497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.798516 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.798542 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.798560 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.841285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.841325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.841334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.841349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.841359 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.861117 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.866224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.866323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.866344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.866419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.866444 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.882184 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.888032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.888077 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.888087 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.888102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.888112 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.907384 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.913296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.913372 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.913389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.913416 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.913439 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.933772 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.938400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.938444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.938455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.938471 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.938481 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.952836 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:53Z is after 2025-08-24T17:21:41Z" Nov 28 13:29:53 crc kubenswrapper[4857]: E1128 13:29:53.953007 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.954529 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.954600 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.954626 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.954656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:53 crc kubenswrapper[4857]: I1128 13:29:53.954684 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:53Z","lastTransitionTime":"2025-11-28T13:29:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.057001 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.057056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.057069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.057093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.057107 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.160127 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.160159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.160167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.160181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.160190 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.228168 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.228266 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:54 crc kubenswrapper[4857]: E1128 13:29:54.228298 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:54 crc kubenswrapper[4857]: E1128 13:29:54.228383 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.228260 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:54 crc kubenswrapper[4857]: E1128 13:29:54.228445 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.262287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.262349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.262362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.262378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.262389 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.364830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.364876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.364886 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.364904 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.364917 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.467749 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.468152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.468167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.468193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.468208 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.574157 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.574587 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.574646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.574748 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.574813 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.677718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.677767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.677776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.677791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.677802 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.779556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.779593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.779603 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.779616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.779627 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.882547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.882602 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.882615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.882633 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.882647 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.985116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.985251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.985267 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.985287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:54 crc kubenswrapper[4857]: I1128 13:29:54.985302 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:54Z","lastTransitionTime":"2025-11-28T13:29:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.088236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.088276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.088285 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.088300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.088330 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.191172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.191242 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.191265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.191292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.191315 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.228693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:55 crc kubenswrapper[4857]: E1128 13:29:55.228840 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.294089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.294135 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.294146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.294164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.294182 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.396964 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.397009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.397020 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.397038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.397050 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.499447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.499510 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.499522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.499541 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.499605 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.522892 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.523879 4857 scope.go:117] "RemoveContainer" containerID="3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167" Nov 28 13:29:55 crc kubenswrapper[4857]: E1128 13:29:55.524123 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.601775 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.601814 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.601822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.601837 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.601848 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.708354 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.708388 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.708396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.708411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.708420 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.811413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.811463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.811475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.811490 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.811503 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.914549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.914635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.914654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.915166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:55 crc kubenswrapper[4857]: I1128 13:29:55.915397 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:55Z","lastTransitionTime":"2025-11-28T13:29:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.019004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.019058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.019072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.019092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.019107 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.122327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.122418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.122443 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.122480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.122502 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.225528 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.225598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.225619 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.225645 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.225664 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.227738 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.227781 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:56 crc kubenswrapper[4857]: E1128 13:29:56.227861 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.228024 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:56 crc kubenswrapper[4857]: E1128 13:29:56.228231 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:56 crc kubenswrapper[4857]: E1128 13:29:56.228280 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.328198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.328268 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.328288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.328315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.328336 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.431198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.431247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.431260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.431279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.431294 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.533884 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.533968 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.533987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.534010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.534028 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.638027 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.638118 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.638136 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.638215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.638236 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.741870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.741933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.741963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.741979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.741990 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.845883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.846019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.846049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.846083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.846110 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.950343 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.950440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.950468 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.950503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:56 crc kubenswrapper[4857]: I1128 13:29:56.950540 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:56Z","lastTransitionTime":"2025-11-28T13:29:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.052767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.052816 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.052863 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.052886 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.052902 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.155408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.155458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.155472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.155491 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.155506 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.228313 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:57 crc kubenswrapper[4857]: E1128 13:29:57.228698 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.258274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.258314 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.258327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.258345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.258356 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.361423 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.361483 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.361496 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.361522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.361538 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.464085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.464131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.464139 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.464153 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.464162 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.566689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.566758 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.566776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.566803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.566822 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.669562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.669615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.669629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.669647 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.669662 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.772407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.772448 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.772459 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.772474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.772486 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.874503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.874554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.874565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.874585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.874599 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.977452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.977513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.977564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.977584 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:57 crc kubenswrapper[4857]: I1128 13:29:57.977598 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:57Z","lastTransitionTime":"2025-11-28T13:29:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.079885 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.079932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.079976 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.079995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.080007 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.182002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.182045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.182056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.182071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.182083 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.228536 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.228613 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.228713 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:29:58 crc kubenswrapper[4857]: E1128 13:29:58.228834 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:29:58 crc kubenswrapper[4857]: E1128 13:29:58.228987 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:29:58 crc kubenswrapper[4857]: E1128 13:29:58.229130 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.284863 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.284920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.284930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.284963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.284979 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.387107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.387147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.387157 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.387173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.387187 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.489852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.489935 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.489996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.490031 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.490055 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.592413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.592480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.592499 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.592524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.592541 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.695385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.695414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.695422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.695435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.695445 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.798122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.798163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.798174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.798200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.798213 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.900888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.900992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.901013 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.901038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:58 crc kubenswrapper[4857]: I1128 13:29:58.901053 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:58Z","lastTransitionTime":"2025-11-28T13:29:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.002983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.003059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.003077 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.003103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.003122 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.106283 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.106331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.106346 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.106368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.106384 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.208710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.208765 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.208779 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.208800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.208817 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.228126 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:29:59 crc kubenswrapper[4857]: E1128 13:29:59.228302 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.312482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.312568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.312591 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.312620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.312645 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.415230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.415295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.415307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.415326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.415341 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.517843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.517894 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.517903 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.517919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.517929 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.620706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.620761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.620776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.620797 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.620813 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.723860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.723899 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.723908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.723923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.723933 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.827315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.827395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.827407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.827423 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.827435 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.930188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.930275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.930311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.930394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:29:59 crc kubenswrapper[4857]: I1128 13:29:59.930424 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:29:59Z","lastTransitionTime":"2025-11-28T13:29:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.033261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.033316 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.033328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.033393 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.033408 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.136488 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.136557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.136569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.136589 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.136603 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.227899 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.227974 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.228004 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:00 crc kubenswrapper[4857]: E1128 13:30:00.228081 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:00 crc kubenswrapper[4857]: E1128 13:30:00.228221 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:00 crc kubenswrapper[4857]: E1128 13:30:00.228356 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.239314 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.239357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.239366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.239385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.239399 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.241444 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.252187 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.262495 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.272151 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.283091 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.295653 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.308311 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.321586 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.333828 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.341897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.341967 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.341978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.341995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.342006 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.348817 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.365771 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.375970 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.388449 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.405082 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.416474 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.428534 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.443728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.443819 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.443852 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.443868 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.443880 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.546807 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.546872 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.546891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.546914 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.546930 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.649978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.650022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.650031 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.650045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.650056 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.752562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.752639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.752656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.752683 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.752704 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.856026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.856101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.856120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.856144 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.856161 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.959406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.959472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.959490 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.959515 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:00 crc kubenswrapper[4857]: I1128 13:30:00.959535 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:00Z","lastTransitionTime":"2025-11-28T13:30:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.061822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.061873 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.061881 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.061895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.061905 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.165732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.165772 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.165783 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.165802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.165815 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.228565 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:01 crc kubenswrapper[4857]: E1128 13:30:01.228746 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.269500 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.269585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.269606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.269637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.269661 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.372001 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.372053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.372064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.372083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.372096 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.475216 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.475306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.475325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.475353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.475371 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.579091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.579163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.579180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.579249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.579288 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.605545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:01 crc kubenswrapper[4857]: E1128 13:30:01.605870 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:30:01 crc kubenswrapper[4857]: E1128 13:30:01.606073 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:30:17.606021123 +0000 UTC m=+67.729962590 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.682876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.683006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.683029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.683066 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.683085 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.786254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.786326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.786337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.786357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.786372 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.889259 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.889341 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.889361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.889394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.889429 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.993888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.993987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.994004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.994035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:01 crc kubenswrapper[4857]: I1128 13:30:01.994061 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:01Z","lastTransitionTime":"2025-11-28T13:30:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.009707 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.009856 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.009983 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:30:34.009912207 +0000 UTC m=+84.133853644 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010010 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010075 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:34.010059591 +0000 UTC m=+84.134001028 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.010070 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.010182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.010220 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010294 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010317 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010321 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010415 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010420 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:34.01039783 +0000 UTC m=+84.134339327 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010455 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010333 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010471 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010610 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:34.010565804 +0000 UTC m=+84.134507261 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.010641 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:30:34.010630096 +0000 UTC m=+84.134571773 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.096925 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.096991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.097008 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.097025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.097037 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.200185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.200270 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.200287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.200315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.200336 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.227792 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.227986 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.228243 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.228322 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.228519 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:02 crc kubenswrapper[4857]: E1128 13:30:02.228773 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.303981 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.304031 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.304040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.304060 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.304078 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.408793 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.409007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.409043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.409143 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.409213 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.512436 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.512487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.512497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.512520 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.512533 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.616046 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.616113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.616133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.616160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.616185 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.719597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.719650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.719658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.719674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.719684 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.821695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.821733 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.821740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.821753 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.821764 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.924853 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.924896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.924905 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.924922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.924935 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:02Z","lastTransitionTime":"2025-11-28T13:30:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:02 crc kubenswrapper[4857]: I1128 13:30:02.998744 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.010530 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.013702 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.028080 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.028171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.028185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.028150 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.028209 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.028228 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.044734 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.059695 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.074114 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.088434 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.105373 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.119695 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.130834 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.130885 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.130896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.130914 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.130929 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.138076 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.154793 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.168248 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.192443 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.205674 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.218757 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.228570 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:03 crc kubenswrapper[4857]: E1128 13:30:03.228756 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.232933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.232975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.232983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.232998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.233010 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.233296 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.252713 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:03Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.335483 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.335533 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.335544 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.335560 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.335572 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.438398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.438443 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.438451 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.438465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.438475 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.541608 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.541664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.541679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.541699 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.541712 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.646069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.646133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.646152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.646179 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.646197 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.748804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.748858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.748869 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.748886 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.748896 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.852165 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.852229 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.852243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.852265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.852278 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.955221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.955325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.955350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.955389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:03 crc kubenswrapper[4857]: I1128 13:30:03.955411 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:03Z","lastTransitionTime":"2025-11-28T13:30:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.058530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.058598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.058613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.058637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.058650 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.162350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.162417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.162435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.162464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.162487 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.228561 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.228712 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.228789 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.228907 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.229050 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.229212 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.266091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.266140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.266152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.266173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.266186 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.296547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.296604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.296623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.296677 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.296696 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.309570 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.313256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.313369 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.313469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.313568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.313687 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.327568 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.331593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.331642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.331654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.331671 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.331683 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.347203 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.350462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.350666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.350769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.350868 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.350990 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.362894 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.371264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.371340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.371351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.371366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.371376 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.383824 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:04Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:04 crc kubenswrapper[4857]: E1128 13:30:04.384036 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.385760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.385798 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.385807 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.385826 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.385838 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.490969 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.491001 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.491009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.491024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.491035 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.593359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.593409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.593423 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.593440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.593452 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.696221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.696271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.696284 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.696301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.696316 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.799583 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.799652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.799668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.799687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.799699 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.902501 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.902577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.902602 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.902634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:04 crc kubenswrapper[4857]: I1128 13:30:04.902655 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:04Z","lastTransitionTime":"2025-11-28T13:30:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.005732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.005782 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.005803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.005819 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.005830 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.108155 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.108190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.108198 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.108212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.108221 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.211570 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.211629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.211647 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.211667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.211680 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.228176 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:05 crc kubenswrapper[4857]: E1128 13:30:05.228344 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.313995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.314035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.314045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.314059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.314070 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.416994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.417038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.417046 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.417060 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.417072 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.519755 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.519804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.519815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.519833 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.519846 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.622407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.622469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.622478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.622495 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.622506 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.725615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.725672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.725689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.725715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.725737 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.828432 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.828487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.828507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.828532 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.828551 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.932488 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.932580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.932601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.932638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:05 crc kubenswrapper[4857]: I1128 13:30:05.932662 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:05Z","lastTransitionTime":"2025-11-28T13:30:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.035685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.035733 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.035749 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.035770 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.035784 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.139281 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.139334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.139344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.139366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.139379 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.228041 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:06 crc kubenswrapper[4857]: E1128 13:30:06.228258 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.228368 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:06 crc kubenswrapper[4857]: E1128 13:30:06.228646 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.228673 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:06 crc kubenswrapper[4857]: E1128 13:30:06.229498 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.230103 4857 scope.go:117] "RemoveContainer" containerID="3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.241112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.241395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.241564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.241923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.242297 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.345698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.345811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.345830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.345860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.345880 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.449138 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.449219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.449230 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.449254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.449276 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.552916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.553104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.553129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.553201 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.553223 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.656965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.657021 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.657035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.657055 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.657070 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.760432 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.760507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.760524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.760550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.760567 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.863440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.863475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.863483 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.863498 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.863508 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.966930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.967025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.967039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.967059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:06 crc kubenswrapper[4857]: I1128 13:30:06.967074 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:06Z","lastTransitionTime":"2025-11-28T13:30:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.069815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.069857 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.069868 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.069886 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.069897 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.172672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.172715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.172730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.172748 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.172759 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.227721 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:07 crc kubenswrapper[4857]: E1128 13:30:07.227876 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.274984 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.275021 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.275033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.275050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.275061 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.344043 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/1.log" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.346559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.347065 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.358460 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.370831 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.377650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.377690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.377701 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.377718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.377727 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.383968 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.401024 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.414234 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.430474 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.441470 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.451886 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.464255 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.477832 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.480529 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.480716 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.480828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.480934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.481058 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.488837 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.501557 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.518169 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.540120 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.554902 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.567672 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.581084 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:07Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.583613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.583661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.583674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.583691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.583708 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.685858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.686251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.686488 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.686679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.686860 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.789273 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.789575 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.789637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.789742 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.789812 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.892856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.892893 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.892900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.892917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:07 crc kubenswrapper[4857]: I1128 13:30:07.892926 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:07Z","lastTransitionTime":"2025-11-28T13:30:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.000062 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.000730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.000758 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.000780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.000793 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.104025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.104061 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.104071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.104086 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.104102 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.207028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.207114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.207133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.207158 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.207177 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.228365 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.228447 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.228385 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:08 crc kubenswrapper[4857]: E1128 13:30:08.228601 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:08 crc kubenswrapper[4857]: E1128 13:30:08.228743 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:08 crc kubenswrapper[4857]: E1128 13:30:08.228850 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.310175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.310232 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.310244 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.310270 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.310285 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.351291 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/2.log" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.352157 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/1.log" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.355175 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50" exitCode=1 Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.355229 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.355273 4857 scope.go:117] "RemoveContainer" containerID="3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.355907 4857 scope.go:117] "RemoveContainer" containerID="304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50" Nov 28 13:30:08 crc kubenswrapper[4857]: E1128 13:30:08.356077 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.371639 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.387236 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.403733 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.412974 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.413023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.413035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.413055 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.413082 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.424827 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3479ddd010edde55297b54ed32949b10c87257a70b551253716751517de87167\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"rator/metrics_TCP_cluster options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.161:443:]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {f32857b5-f652-4313-a0d7-455c3156dd99}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1128 13:29:45.083329 6319 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for namespace Informer during admin network policy controller initialization, handler {0x1fcbf20 0x1fcbc00 0x1fcbba0} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:29:45Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:29:45.080546 6319 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/mul\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:44Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.440918 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.456043 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.470864 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.487994 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.502525 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.514615 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.516004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.516038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.516049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.516068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.516081 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.528464 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.539966 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.553125 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.566049 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.580744 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.595318 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.613009 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:08Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.619292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.619333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.619342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.619358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.619370 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.722383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.722437 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.722458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.722503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.722518 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.825133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.825176 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.825191 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.825209 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.825223 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.927711 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.927751 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.927761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.927778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:08 crc kubenswrapper[4857]: I1128 13:30:08.927790 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:08Z","lastTransitionTime":"2025-11-28T13:30:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.029979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.030012 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.030020 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.030034 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.030046 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.132524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.132566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.132580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.132597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.132607 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.228039 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:09 crc kubenswrapper[4857]: E1128 13:30:09.228212 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.235002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.235050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.235063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.235080 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.235090 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.338392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.338631 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.338765 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.338856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.338998 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.361871 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/2.log" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.365772 4857 scope.go:117] "RemoveContainer" containerID="304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50" Nov 28 13:30:09 crc kubenswrapper[4857]: E1128 13:30:09.365985 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.380155 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.397512 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.412482 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.426446 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.439409 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.441276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.441311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.441319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.441352 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.441363 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.456190 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.472677 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.493210 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.507222 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.524151 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.539706 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.544040 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.544109 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.544121 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.544141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.544155 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.554573 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.569333 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.581684 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.600969 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.639111 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.646653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.646933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.647030 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.647108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.647190 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.661108 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:09Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.750707 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.750756 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.750772 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.750794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.750808 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.853201 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.853272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.853292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.853327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.853372 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.956812 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.957333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.957493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.957632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:09 crc kubenswrapper[4857]: I1128 13:30:09.957794 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:09Z","lastTransitionTime":"2025-11-28T13:30:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.060897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.060972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.060983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.060998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.061009 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.163487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.163541 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.163552 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.163568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.163582 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.227812 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.227812 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:10 crc kubenswrapper[4857]: E1128 13:30:10.227939 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:10 crc kubenswrapper[4857]: E1128 13:30:10.228100 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.227818 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:10 crc kubenswrapper[4857]: E1128 13:30:10.228258 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.248794 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.263899 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.266719 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.266778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.266791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.266811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.266856 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.287276 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.304243 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.316406 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.336191 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.353194 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.369119 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.369178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.369193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.369217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.369234 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.369244 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.379732 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.391578 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.402329 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.412119 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.424328 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.435773 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.449406 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.469279 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.471389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.471422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.471432 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.471447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.471458 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.481249 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:10Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.573112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.573150 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.573161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.573176 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.573187 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.675233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.675275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.675286 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.675303 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.675313 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.778348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.778389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.778400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.778418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.778430 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.882545 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.882664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.882703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.882739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.882763 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.985668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.985719 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.985736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.985758 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:10 crc kubenswrapper[4857]: I1128 13:30:10.985779 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:10Z","lastTransitionTime":"2025-11-28T13:30:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.088245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.088996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.089009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.089024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.089034 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.192480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.192563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.192576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.192596 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.192609 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.228471 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:11 crc kubenswrapper[4857]: E1128 13:30:11.228671 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.295517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.295568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.295581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.295601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.295615 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.398848 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.398915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.398933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.399006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.399025 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.501676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.501741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.501759 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.501780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.501795 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.605123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.605167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.605178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.605201 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.605212 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.707904 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.708247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.708344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.708441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.708525 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.811558 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.811858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.811965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.812062 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.812150 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.916089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.916167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.916190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.916224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:11 crc kubenswrapper[4857]: I1128 13:30:11.916247 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:11Z","lastTransitionTime":"2025-11-28T13:30:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.019414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.019922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.020205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.020463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.020737 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.123275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.123313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.123325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.123341 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.123353 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.226484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.226917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.227021 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.227117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.227193 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.227727 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.227771 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:12 crc kubenswrapper[4857]: E1128 13:30:12.227896 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.228149 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:12 crc kubenswrapper[4857]: E1128 13:30:12.228267 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:12 crc kubenswrapper[4857]: E1128 13:30:12.228499 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.330049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.330087 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.330099 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.330116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.330130 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.432618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.432653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.432663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.432680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.432691 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.534182 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.534213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.534222 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.534239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.534252 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.637328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.637380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.637396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.637418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.637434 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.739619 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.739667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.739678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.739695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.739709 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.843115 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.843424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.843538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.843617 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.843802 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.947418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.947474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.947488 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.947511 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:12 crc kubenswrapper[4857]: I1128 13:30:12.947525 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:12Z","lastTransitionTime":"2025-11-28T13:30:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.050339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.050401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.050418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.050444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.050461 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.154253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.154308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.154322 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.154342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.154357 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.228291 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:13 crc kubenswrapper[4857]: E1128 13:30:13.228482 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.257249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.257296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.257311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.257332 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.257343 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.360414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.360474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.360486 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.360503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.360517 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.462978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.463027 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.463039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.463057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.463072 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.568964 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.569014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.569038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.569055 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.569067 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.672836 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.672884 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.672896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.672914 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.672927 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.776236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.776282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.776293 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.776310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.776321 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.900818 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.900860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.900870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.900888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:13 crc kubenswrapper[4857]: I1128 13:30:13.900899 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:13Z","lastTransitionTime":"2025-11-28T13:30:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.003751 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.003809 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.003825 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.003845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.003859 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.107193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.107855 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.108023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.108124 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.108209 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.211585 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.211656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.211674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.211705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.211723 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.227839 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.227909 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.228113 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.228100 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.228293 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.228491 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.315403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.315999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.316019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.316042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.316053 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.418856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.418912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.418922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.418938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.418975 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.445409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.445445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.445455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.445472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.445482 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.460574 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.465831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.465887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.465901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.465920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.465938 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.478098 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.482484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.482551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.482567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.482593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.482604 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.495185 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.498476 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.498539 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.498553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.498574 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.498592 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.511930 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.516021 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.516068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.516082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.516102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.516115 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.530817 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:14Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:14 crc kubenswrapper[4857]: E1128 13:30:14.530987 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.532792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.532842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.532855 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.532876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.532890 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.635579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.635644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.635656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.635679 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.635694 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.738635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.738703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.738713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.738729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.738743 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.841956 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.842022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.842036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.842069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.842083 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.945412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.945473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.945493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.945518 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:14 crc kubenswrapper[4857]: I1128 13:30:14.945536 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:14Z","lastTransitionTime":"2025-11-28T13:30:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.049385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.049446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.049457 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.049473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.049485 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.152488 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.152555 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.152568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.152593 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.152606 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.228169 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:15 crc kubenswrapper[4857]: E1128 13:30:15.228313 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.255461 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.255519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.255537 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.255558 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.255571 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.358498 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.358549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.358704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.358728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.358741 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.461817 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.461855 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.461872 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.461894 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.461906 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.564894 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.564936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.564961 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.564976 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.564986 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.668378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.668419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.668429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.668446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.668456 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.770729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.770776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.770787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.770805 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.770815 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.873898 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.873960 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.873970 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.873985 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.873996 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.976203 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.976236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.976253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.976271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:15 crc kubenswrapper[4857]: I1128 13:30:15.976283 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:15Z","lastTransitionTime":"2025-11-28T13:30:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.079318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.079363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.079377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.079395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.079409 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.182615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.182670 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.182681 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.182698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.182711 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.228656 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.228781 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.228656 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:16 crc kubenswrapper[4857]: E1128 13:30:16.228903 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:16 crc kubenswrapper[4857]: E1128 13:30:16.229045 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:16 crc kubenswrapper[4857]: E1128 13:30:16.229220 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.285917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.286035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.286061 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.286094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.286124 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.388320 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.388356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.388367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.388382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.388396 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.491516 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.491557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.491570 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.491587 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.491600 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.593632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.593671 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.593680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.593696 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.593705 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.696378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.696428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.696440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.696463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.696474 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.800927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.800988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.801006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.801022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.801033 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.903581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.903624 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.903637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.903653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:16 crc kubenswrapper[4857]: I1128 13:30:16.903664 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:16Z","lastTransitionTime":"2025-11-28T13:30:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.006737 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.006781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.006792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.006810 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.006823 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.109127 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.109194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.109211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.109237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.109255 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.211604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.211644 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.211654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.211671 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.211684 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.228169 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:17 crc kubenswrapper[4857]: E1128 13:30:17.228326 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.314348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.314398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.314409 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.314426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.314439 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.416556 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.416590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.416601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.416617 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.416628 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.519042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.519081 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.519091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.519106 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.519119 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.622277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.622521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.622534 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.622552 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.622588 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.676186 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:17 crc kubenswrapper[4857]: E1128 13:30:17.676383 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:30:17 crc kubenswrapper[4857]: E1128 13:30:17.676443 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:30:49.676423903 +0000 UTC m=+99.800365340 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.724881 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.724929 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.724977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.725004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.725016 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.827651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.827714 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.827728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.827845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.827869 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.934974 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.935042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.935056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.935081 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:17 crc kubenswrapper[4857]: I1128 13:30:17.935093 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:17Z","lastTransitionTime":"2025-11-28T13:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.037858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.037921 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.037977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.038004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.038024 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.141287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.141325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.141334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.141347 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.141359 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.228285 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.228447 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:18 crc kubenswrapper[4857]: E1128 13:30:18.228583 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.228692 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:18 crc kubenswrapper[4857]: E1128 13:30:18.229194 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:18 crc kubenswrapper[4857]: E1128 13:30:18.229328 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.244445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.244800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.244913 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.245060 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.245192 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.348502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.349490 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.349648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.349816 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.349993 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.452056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.452380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.452495 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.452631 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.452721 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.554696 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.554733 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.554743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.554759 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.554772 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.657224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.657255 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.657263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.657276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.657284 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.770675 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.770705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.770715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.770729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.770738 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.872654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.872689 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.872700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.872715 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.872726 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.975455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.975763 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.975834 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.975900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:18 crc kubenswrapper[4857]: I1128 13:30:18.976016 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:18Z","lastTransitionTime":"2025-11-28T13:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.079361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.079403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.079411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.079427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.079439 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.182034 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.182078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.182089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.182104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.182116 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.227733 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:19 crc kubenswrapper[4857]: E1128 13:30:19.227841 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.285115 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.285182 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.285205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.285237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.285258 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.387646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.387684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.387693 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.387706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.387719 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.397661 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/0.log" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.397716 4857 generic.go:334] "Generic (PLEG): container finished" podID="787c64de-9ce4-41eb-a525-948c23e84595" containerID="d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395" exitCode=1 Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.397750 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerDied","Data":"d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.398201 4857 scope.go:117] "RemoveContainer" containerID="d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.421120 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.435593 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.453275 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.472928 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.487460 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.490668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.490697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.490706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.490722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.490732 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.502669 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.516520 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.529712 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.542798 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.558434 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.573126 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.586868 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.593064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.593106 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.593120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.593138 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.593153 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.601636 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.618906 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.634290 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.656491 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.670447 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:19Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.696008 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.696059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.696072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.696090 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.696103 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.799209 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.799262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.799280 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.799299 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.799316 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.901596 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.901631 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.901642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.901658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:19 crc kubenswrapper[4857]: I1128 13:30:19.901670 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:19Z","lastTransitionTime":"2025-11-28T13:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.003385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.003434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.003446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.003462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.003475 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.105703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.105744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.105757 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.105774 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.105786 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.208638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.208687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.208698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.208721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.208733 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.228029 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.228128 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.228173 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:20 crc kubenswrapper[4857]: E1128 13:30:20.228324 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:20 crc kubenswrapper[4857]: E1128 13:30:20.228445 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:20 crc kubenswrapper[4857]: E1128 13:30:20.228543 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.246861 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.260623 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.274238 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.288620 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.304857 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.312360 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.312429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.312454 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.312491 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.312516 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.319608 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.340524 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.355716 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.373310 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.391515 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.405390 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/0.log" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.405473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerStarted","Data":"d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.410349 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.418224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.418282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.418298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.418318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.418332 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.434728 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.449169 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.464803 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.480996 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.498152 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.512207 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.521514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.521551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.521562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.521579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.521589 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.534235 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.552737 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.575776 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.591990 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.605810 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.619882 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.624616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.624658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.624671 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.624687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.624700 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.632810 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.647399 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.662276 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.675123 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.691870 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.707459 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.727613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.727674 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.727687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.727705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.727721 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.731063 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.744995 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.758976 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.773576 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.788090 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:20Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.830426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.830470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.830478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.830493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.830504 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.932995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.933045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.933056 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.933072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:20 crc kubenswrapper[4857]: I1128 13:30:20.933082 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:20Z","lastTransitionTime":"2025-11-28T13:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.035474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.035521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.035532 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.035548 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.035559 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.184915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.184989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.185005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.185025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.185039 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.227788 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:21 crc kubenswrapper[4857]: E1128 13:30:21.227978 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.287847 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.287916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.287937 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.288004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.288041 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.390564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.390610 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.390620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.390637 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.390649 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.493195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.493261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.493269 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.493287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.493299 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.596407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.596452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.596467 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.596523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.596542 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.698664 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.698712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.698721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.698739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.698748 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.801529 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.801908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.801917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.801932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.801960 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.906641 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.906765 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.906794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.906830 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:21 crc kubenswrapper[4857]: I1128 13:30:21.906853 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:21Z","lastTransitionTime":"2025-11-28T13:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.010663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.010717 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.010727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.010745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.010758 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.114103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.114252 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.114279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.114321 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.114344 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.217135 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.217169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.217178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.217192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.217202 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.229477 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:22 crc kubenswrapper[4857]: E1128 13:30:22.229754 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.229909 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:22 crc kubenswrapper[4857]: E1128 13:30:22.229978 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.230165 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:22 crc kubenswrapper[4857]: E1128 13:30:22.230219 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.320060 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.320099 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.320113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.320130 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.320143 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.423143 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.423173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.423181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.423199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.423232 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.525506 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.525557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.525570 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.525586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.525599 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.628812 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.628905 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.628924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.628982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.629008 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.732026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.732059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.732069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.732088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.732101 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.835490 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.835546 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.835559 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.835579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.835592 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.938634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.938695 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.938712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.938741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:22 crc kubenswrapper[4857]: I1128 13:30:22.938759 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:22Z","lastTransitionTime":"2025-11-28T13:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.041672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.041739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.041762 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.041790 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.041811 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.145168 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.145223 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.145245 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.145268 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.145286 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.228212 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:23 crc kubenswrapper[4857]: E1128 13:30:23.228417 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.248606 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.248653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.248661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.248676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.248686 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.352010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.352070 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.352083 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.352104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.352121 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.455045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.455111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.455129 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.455154 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.455173 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.557064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.557115 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.557137 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.557158 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.557175 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.659983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.660037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.660050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.660068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.660083 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.763250 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.763325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.763340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.763357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.763369 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.865609 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.865661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.865669 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.865684 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.865694 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.969054 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.969102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.969118 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.969141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:23 crc kubenswrapper[4857]: I1128 13:30:23.969157 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:23Z","lastTransitionTime":"2025-11-28T13:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.072013 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.072071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.072085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.072109 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.072124 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.174979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.175050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.175069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.175093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.175110 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.228477 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.228646 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.228864 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.228965 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.230845 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.231141 4857 scope.go:117] "RemoveContainer" containerID="304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50" Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.231133 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.231318 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.277865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.277911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.277922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.277938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.277971 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.381017 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.381082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.381100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.381125 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.381144 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.484294 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.484359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.484378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.484406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.484424 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.588090 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.588180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.588201 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.588227 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.588246 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.691915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.692038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.692091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.692124 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.692147 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.796656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.796748 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.796782 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.796815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.796843 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.816290 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.816367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.816387 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.816414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.816433 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.840913 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.846613 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.846709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.846732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.846758 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.846776 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.867671 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.873378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.873426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.873442 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.873466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.873482 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.889638 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.894633 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.894692 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.894709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.894729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.894744 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.912935 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.917805 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.917916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.917972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.917998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.918021 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.936169 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:24Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:24 crc kubenswrapper[4857]: E1128 13:30:24.936393 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.939169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.939221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.939239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.939259 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:24 crc kubenswrapper[4857]: I1128 13:30:24.939277 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:24Z","lastTransitionTime":"2025-11-28T13:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.044490 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.044554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.044564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.044582 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.044595 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.147726 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.147769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.147781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.147799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.147811 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.228582 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:25 crc kubenswrapper[4857]: E1128 13:30:25.228811 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.252310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.252384 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.252410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.252441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.252466 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.355923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.356027 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.356057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.356088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.356113 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.458822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.458894 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.458921 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.459012 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.459046 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.562238 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.562299 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.562318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.562341 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.562360 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.665503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.665629 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.665657 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.665685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.665705 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.768773 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.768847 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.768864 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.768889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.768908 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.872344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.872407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.872425 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.872452 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.872470 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.975819 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.975903 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.975978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.976011 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:25 crc kubenswrapper[4857]: I1128 13:30:25.976030 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:25Z","lastTransitionTime":"2025-11-28T13:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.078661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.078701 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.078712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.078732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.078744 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.182480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.182531 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.182548 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.182571 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.182590 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.229050 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.229177 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.229219 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:26 crc kubenswrapper[4857]: E1128 13:30:26.229263 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:26 crc kubenswrapper[4857]: E1128 13:30:26.229433 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:26 crc kubenswrapper[4857]: E1128 13:30:26.229532 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.285566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.285623 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.285642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.285666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.285685 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.388654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.388724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.388746 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.388777 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.388801 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.491877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.491936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.491985 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.492007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.492023 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.599554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.599612 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.599625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.599642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.599654 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.703868 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.703984 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.704002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.704034 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.704053 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.808006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.808082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.808104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.808131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.808156 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.911607 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.911673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.911690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.911713 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:26 crc kubenswrapper[4857]: I1128 13:30:26.911733 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:26Z","lastTransitionTime":"2025-11-28T13:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.016561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.016634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.016650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.016667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.016682 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.120802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.120851 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.120868 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.121036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.121069 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.223193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.223233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.223241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.223255 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.223265 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.228674 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:27 crc kubenswrapper[4857]: E1128 13:30:27.228813 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.337023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.337094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.337114 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.337140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.337161 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.440581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.440897 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.441010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.441116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.441210 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.544150 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.544194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.544207 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.544225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.544248 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.647388 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.647445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.647456 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.647478 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.647495 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.750926 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.751006 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.751026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.751050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.751069 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.854244 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.854328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.854345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.854401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.854419 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.957007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.957065 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.957082 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.957104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:27 crc kubenswrapper[4857]: I1128 13:30:27.957124 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:27Z","lastTransitionTime":"2025-11-28T13:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.059807 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.059856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.059870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.059887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.059897 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.162721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.162824 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.162838 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.162853 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.162864 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.228407 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:28 crc kubenswrapper[4857]: E1128 13:30:28.228599 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.228814 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:28 crc kubenswrapper[4857]: E1128 13:30:28.228880 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.229122 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:28 crc kubenswrapper[4857]: E1128 13:30:28.229180 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.265298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.265334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.265343 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.265355 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.265364 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.367741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.367890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.367908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.367927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.367939 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.471346 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.471391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.471403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.471423 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.471436 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.575149 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.575226 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.575237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.575251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.575264 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.678659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.678737 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.678751 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.678794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.678810 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.781762 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.781809 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.781824 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.781841 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.781855 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.884480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.884523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.884540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.884562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.884580 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.987859 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.987911 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.987921 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.987938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:28 crc kubenswrapper[4857]: I1128 13:30:28.987970 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:28Z","lastTransitionTime":"2025-11-28T13:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.090296 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.090582 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.090592 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.090610 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.090622 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.193858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.193895 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.193904 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.193919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.193930 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.228547 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:29 crc kubenswrapper[4857]: E1128 13:30:29.229123 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.296842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.296908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.296928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.296983 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.297002 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.400592 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.400638 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.400648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.400667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.400679 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.504180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.504591 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.504764 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.504909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.505078 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.608991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.609359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.609457 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.609544 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.609636 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.713294 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.713338 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.713349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.713368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.713382 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.818196 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.818314 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.818336 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.818362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.818384 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.921730 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.921767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.921776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.921793 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:29 crc kubenswrapper[4857]: I1128 13:30:29.921803 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:29Z","lastTransitionTime":"2025-11-28T13:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.024798 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.024887 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.024913 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.024975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.025035 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.128169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.128213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.128223 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.128240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.128251 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.228178 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.228293 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:30 crc kubenswrapper[4857]: E1128 13:30:30.228379 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.230266 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:30 crc kubenswrapper[4857]: E1128 13:30:30.230542 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:30 crc kubenswrapper[4857]: E1128 13:30:30.230577 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.233550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.233578 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.233592 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.233608 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.233618 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.250980 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.286988 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.303571 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.322038 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.337527 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.337599 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.337618 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.337648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.337670 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.337663 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.353444 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.370870 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.387415 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.409891 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.428760 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.441546 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.441661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.441688 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.441773 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.441806 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.446884 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.460431 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.472137 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.483787 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.493203 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.501875 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.519194 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:30Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.545793 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.545846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.545859 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.545876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.545888 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.648359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.648405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.648418 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.648439 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.648449 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.750876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.750930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.750940 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.750977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.750988 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.854469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.854514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.854526 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.854544 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.854559 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.957894 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.957966 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.957986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.958007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:30 crc kubenswrapper[4857]: I1128 13:30:30.958019 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:30Z","lastTransitionTime":"2025-11-28T13:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.062255 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.062318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.062335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.062354 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.062365 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.164849 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.165116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.165173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.165194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.165206 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.228276 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:31 crc kubenswrapper[4857]: E1128 13:30:31.228455 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.269312 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.269387 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.269419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.269449 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.269473 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.373791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.373844 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.373858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.373881 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.373892 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.476732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.476783 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.476796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.476815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.476828 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.580591 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.580660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.580672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.580698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.580714 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.683653 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.683702 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.683716 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.683737 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.683752 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.786936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.787025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.787039 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.787058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.787072 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.890042 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.890110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.890128 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.890160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.890186 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.993070 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.993116 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.993127 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.993142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:31 crc kubenswrapper[4857]: I1128 13:30:31.993153 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:31Z","lastTransitionTime":"2025-11-28T13:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.095764 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.095803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.095811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.095825 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.095834 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.199140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.199185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.199195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.199210 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.199223 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.228745 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.228780 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.228829 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:32 crc kubenswrapper[4857]: E1128 13:30:32.229010 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:32 crc kubenswrapper[4857]: E1128 13:30:32.229371 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:32 crc kubenswrapper[4857]: E1128 13:30:32.229455 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.302169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.302216 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.302226 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.302240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.302251 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.405373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.405431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.405444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.405463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.405476 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.509663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.509741 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.509770 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.509801 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.509827 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.613680 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.613740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.613755 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.613776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.613788 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.717368 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.717429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.717442 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.717464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.717477 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.820079 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.820146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.820162 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.820186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.820204 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.923877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.923994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.924024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.924058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:32 crc kubenswrapper[4857]: I1128 13:30:32.924082 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:32Z","lastTransitionTime":"2025-11-28T13:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.029850 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.029928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.029978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.030007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.030026 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.133047 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.133103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.133112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.133130 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.133144 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.228134 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:33 crc kubenswrapper[4857]: E1128 13:30:33.228332 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.237272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.237336 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.237349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.237371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.237388 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.340901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.340988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.341007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.341031 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.341050 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.445178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.445258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.445279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.445307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.445328 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.445715 4857 fsHandler.go:133] fs: disk usage and inodes count on following dirs took 1.059162822s: [/var/lib/containers/storage/overlay/44991cbe2e8eda73f6fd72fb7e2dc6461db8f49d57cda6d46f68fa4289ad4e68/diff /var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log]; will not log again for this container unless duration exceeds 2s Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.549300 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.549347 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.549361 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.549379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.549392 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.652700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.652749 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.652761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.652778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.652791 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.755857 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.755977 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.755994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.756016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.756031 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.858987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.859051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.859067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.859088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.859099 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.962374 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.962443 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.962454 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.962475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:33 crc kubenswrapper[4857]: I1128 13:30:33.962488 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:33Z","lastTransitionTime":"2025-11-28T13:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.058254 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.058386 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.058420 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058533 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.058487547 +0000 UTC m=+148.182428984 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058541 4857 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058579 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.058590 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058605 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058645 4857 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058626 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.058616271 +0000 UTC m=+148.182557708 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.058719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058781 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.058758244 +0000 UTC m=+148.182699911 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058815 4857 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058857 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058890 4857 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058908 4857 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058972 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.058920889 +0000 UTC m=+148.182862346 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.058997 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.05898655 +0000 UTC m=+148.182927997 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.065326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.065380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.065395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.065423 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.065445 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.169288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.169337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.169351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.169373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.169388 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.227861 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.227903 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.228042 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.227919 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.228165 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:34 crc kubenswrapper[4857]: E1128 13:30:34.228337 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.279892 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.280181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.280193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.280211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.280226 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.384383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.384471 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.384498 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.384538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.384564 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.487317 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.487376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.487391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.487412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.487425 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.590615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.590672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.590691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.590721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.590739 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.694375 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.694434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.694444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.694463 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.694474 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.798091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.798186 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.798213 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.798247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.798271 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.901761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.901825 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.901838 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.901862 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:34 crc kubenswrapper[4857]: I1128 13:30:34.901877 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:34Z","lastTransitionTime":"2025-11-28T13:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.005199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.005247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.005261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.005278 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.005289 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.020420 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.020473 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.020486 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.020509 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.020524 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.034734 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.039033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.039094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.039107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.039131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.039144 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.051123 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.056436 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.056499 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.056512 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.056535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.056545 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.073550 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.078685 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.078760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.078777 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.078804 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.078821 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.095694 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.102057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.102125 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.102141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.102164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.102178 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.123327 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.123526 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.126211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.126265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.126277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.126298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.126310 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.228218 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:35 crc kubenswrapper[4857]: E1128 13:30:35.229099 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.229905 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.229932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.229957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.229972 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.229982 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.230344 4857 scope.go:117] "RemoveContainer" containerID="304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.240100 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.332962 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.333012 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.333023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.333044 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.333056 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.436447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.436506 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.436519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.436538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.436549 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.473564 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/2.log" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.477335 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.478312 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.496252 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.509221 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.521676 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.536146 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.539289 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.539335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.539345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.539364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.539381 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.551740 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.575707 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.595450 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.611461 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.627141 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.642221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.642259 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.642272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.642291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.642306 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.661701 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.684072 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.698203 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.712425 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.728489 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.743359 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.745659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.745700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.745721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.745745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.745760 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.756508 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.768392 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.783452 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b8298b87-d55d-436d-8b66-848d1fb0563f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:35Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.848433 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.848485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.848497 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.848514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.848526 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.952185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.952687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.952709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.952739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:35 crc kubenswrapper[4857]: I1128 13:30:35.952763 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:35Z","lastTransitionTime":"2025-11-28T13:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.055481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.055538 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.055550 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.055573 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.055586 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.159147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.159215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.159234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.159263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.159286 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.228014 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.228108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.228126 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:36 crc kubenswrapper[4857]: E1128 13:30:36.228251 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:36 crc kubenswrapper[4857]: E1128 13:30:36.228374 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:36 crc kubenswrapper[4857]: E1128 13:30:36.228514 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.262691 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.262747 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.262761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.262780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.262795 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.366019 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.366066 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.366078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.366095 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.366106 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.470335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.470419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.470440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.470472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.470491 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.485528 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/3.log" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.486485 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/2.log" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.491457 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" exitCode=1 Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.491515 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.491573 4857 scope.go:117] "RemoveContainer" containerID="304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.492386 4857 scope.go:117] "RemoveContainer" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" Nov 28 13:30:36 crc kubenswrapper[4857]: E1128 13:30:36.492619 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.515538 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.536245 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.552516 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.572793 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://304840d0c27e42a0a5ff1c6fbfd883550ce473378e8e7ae7d4a33e420f62de50\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:07Z\\\",\\\"message\\\":\\\"590 default_network_controller.go:776] Recording success event on pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb\\\\nI1128 13:30:07.281386 6590 services_controller.go:453] Built service openshift-network-diagnostics/network-check-target template LB for network=default: []services.LB{}\\\\nI1128 13:30:07.281499 6590 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1128 13:30:07.281362 6590 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nF1128 13:30:07.281457 6590 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:06Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:36Z\\\",\\\"message\\\":\\\"er {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:30:36.045272 6961 services_controller.go:454] Service openshift-oauth-apiserver/api for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 13:30:36.045299 6961 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-oauth-apiserver/api_TCP_cluster\\\\\\\", UUID:\\\\\\\"fe46cb89-4e54-4175-a112-1c5224cd299e\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-oauth-apiserver/api\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Rou\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.574990 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.575050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.575068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.575093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.575116 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.588807 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.608105 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.622405 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.637265 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.653878 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.666175 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.677508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.677554 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.677564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.677581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.677592 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.679481 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b8298b87-d55d-436d-8b66-848d1fb0563f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.694814 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.709164 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.722560 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.734207 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.747889 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.759778 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.771023 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.780289 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.780358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.780381 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.780407 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.780424 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.884151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.884194 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.884205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.884219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.884229 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.988110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.988166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.988174 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.988190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:36 crc kubenswrapper[4857]: I1128 13:30:36.988201 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:36Z","lastTransitionTime":"2025-11-28T13:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.092652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.092736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.092756 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.092784 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.092812 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.196839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.196901 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.196916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.196963 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.196978 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.228400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:37 crc kubenswrapper[4857]: E1128 13:30:37.228537 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.299785 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.299834 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.299845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.299860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.299872 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.402665 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.402718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.402728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.402744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.402756 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.498895 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/3.log" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.505277 4857 scope.go:117] "RemoveContainer" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" Nov 28 13:30:37 crc kubenswrapper[4857]: E1128 13:30:37.505612 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.505686 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.505744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.505760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.505787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.505809 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.523744 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.544173 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.560429 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.582060 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.595465 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.609488 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.609553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.609563 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.609580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.609590 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.610344 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b8298b87-d55d-436d-8b66-848d1fb0563f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.626381 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.646238 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.666297 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.682933 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.701217 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.712450 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.712505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.712517 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.712536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.712552 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.719547 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.733504 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.749855 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.764355 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.783132 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.804592 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:36Z\\\",\\\"message\\\":\\\"er {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:30:36.045272 6961 services_controller.go:454] Service openshift-oauth-apiserver/api for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 13:30:36.045299 6961 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-oauth-apiserver/api_TCP_cluster\\\\\\\", UUID:\\\\\\\"fe46cb89-4e54-4175-a112-1c5224cd299e\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-oauth-apiserver/api\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Rou\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.815690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.815729 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.815743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.815823 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.815847 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.819141 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:37Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.919652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.919703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.919718 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.919737 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:37 crc kubenswrapper[4857]: I1128 13:30:37.919750 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:37Z","lastTransitionTime":"2025-11-28T13:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.023139 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.023206 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.023219 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.023238 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.023252 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.126662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.126716 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.126727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.126744 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.126757 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.227906 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:38 crc kubenswrapper[4857]: E1128 13:30:38.228128 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.227906 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:38 crc kubenswrapper[4857]: E1128 13:30:38.228296 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.228767 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:38 crc kubenswrapper[4857]: E1128 13:30:38.229053 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.229805 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.229846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.229859 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.229880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.229894 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.332681 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.332760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.332771 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.332790 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.332802 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.436472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.436530 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.436543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.436562 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.436579 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.540547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.540640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.540663 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.540696 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.540719 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.644212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.644289 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.644310 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.644343 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.644369 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.748345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.748438 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.748462 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.748494 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.748518 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.851672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.851768 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.851778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.851801 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.851812 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.955126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.955200 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.955212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.955234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:38 crc kubenswrapper[4857]: I1128 13:30:38.955249 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:38Z","lastTransitionTime":"2025-11-28T13:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.058403 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.058455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.058464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.058482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.058494 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.161447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.161509 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.161522 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.161544 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.161557 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.228706 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:39 crc kubenswrapper[4857]: E1128 13:30:39.228909 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.265038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.265092 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.265104 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.265120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.265133 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.368447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.368494 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.368504 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.368523 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.368535 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.472256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.472327 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.472341 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.472362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.472378 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.576577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.576646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.576666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.576693 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.576711 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.680161 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.680221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.680234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.680254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.680268 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.783581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.783851 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.783871 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.783927 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.784015 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.887910 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.888050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.888666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.888732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.888758 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.992484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.992536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.992548 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.992566 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:39 crc kubenswrapper[4857]: I1128 13:30:39.992578 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:39Z","lastTransitionTime":"2025-11-28T13:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.095565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.095631 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.095643 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.095667 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.095679 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.200173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.200256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.200275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.200305 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.200325 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.228082 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.228160 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.228240 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:40 crc kubenswrapper[4857]: E1128 13:30:40.228289 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:40 crc kubenswrapper[4857]: E1128 13:30:40.228401 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:40 crc kubenswrapper[4857]: E1128 13:30:40.228586 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.244889 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.259514 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.274175 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.288484 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.302931 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.303024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.303036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.303054 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.303064 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.304163 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.325223 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.350170 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:36Z\\\",\\\"message\\\":\\\"er {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:30:36.045272 6961 services_controller.go:454] Service openshift-oauth-apiserver/api for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 13:30:36.045299 6961 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-oauth-apiserver/api_TCP_cluster\\\\\\\", UUID:\\\\\\\"fe46cb89-4e54-4175-a112-1c5224cd299e\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-oauth-apiserver/api\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Rou\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.364206 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.383537 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.397207 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.405704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.405781 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.405794 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.405815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.405828 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.411655 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.426076 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.441441 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.452466 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b8298b87-d55d-436d-8b66-848d1fb0563f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.477205 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.498573 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.509134 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.509188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.509197 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.509215 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.509229 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.515876 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.529024 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:40Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.612480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.612529 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.612539 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.612558 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.612571 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.715545 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.715601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.715616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.715636 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.715648 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.818458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.818527 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.818540 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.818560 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.818573 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.921899 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.921978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.921991 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.922008 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:40 crc kubenswrapper[4857]: I1128 13:30:40.922022 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:40Z","lastTransitionTime":"2025-11-28T13:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.025678 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.025727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.025739 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.025759 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.025773 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.129131 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.129193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.129204 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.129221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.129232 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.228054 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:41 crc kubenswrapper[4857]: E1128 13:30:41.228228 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.231787 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.231835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.231848 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.231868 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.231880 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.335657 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.335721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.335732 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.335751 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.335762 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.437974 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.438016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.438026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.438041 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.438051 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.540922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.540989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.540999 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.541018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.541031 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.644017 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.644079 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.644251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.644283 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.644298 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.747435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.747498 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.747508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.747525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.747536 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.850705 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.850740 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.850749 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.850764 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.850773 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.954157 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.954221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.954234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.954253 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:41 crc kubenswrapper[4857]: I1128 13:30:41.954317 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:41Z","lastTransitionTime":"2025-11-28T13:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.058239 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.058769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.058915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.059074 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.059200 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.162004 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.162059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.162071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.162088 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.162102 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.228606 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.228652 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:42 crc kubenswrapper[4857]: E1128 13:30:42.228781 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.228867 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:42 crc kubenswrapper[4857]: E1128 13:30:42.229068 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:42 crc kubenswrapper[4857]: E1128 13:30:42.229200 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.265113 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.265178 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.265195 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.265220 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.265239 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.368029 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.368078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.368091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.368108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.368122 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.470832 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.470870 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.470883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.470900 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.470909 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.572853 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.572984 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.572994 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.573007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.573017 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.676045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.676111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.676125 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.676148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.676163 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.779117 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.779169 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.779185 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.779208 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.779224 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.882087 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.882120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.882132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.882146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.882158 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.984658 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.985091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.985269 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.985581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:42 crc kubenswrapper[4857]: I1128 13:30:42.985772 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:42Z","lastTransitionTime":"2025-11-28T13:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.087861 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.087908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.087918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.087933 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.087963 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.190930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.190978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.190988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.191003 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.191013 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.228694 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:43 crc kubenswrapper[4857]: E1128 13:30:43.229485 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.293981 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.294028 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.294037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.294054 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.294064 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.396482 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.396524 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.396535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.396551 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.396564 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.499782 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.500062 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.500177 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.500258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.500336 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.602769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.602802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.602811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.602824 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.602833 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.705231 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.705278 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.705291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.705308 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.705322 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.808849 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.808903 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.808912 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.808930 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.808969 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.912262 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.912338 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.912357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.912389 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:43 crc kubenswrapper[4857]: I1128 13:30:43.912409 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:43Z","lastTransitionTime":"2025-11-28T13:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.015535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.015591 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.015605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.015627 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.015642 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.118989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.119045 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.119055 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.119072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.119083 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.222525 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.222577 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.222590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.222605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.222616 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.227882 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.227936 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.228000 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:44 crc kubenswrapper[4857]: E1128 13:30:44.228129 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:44 crc kubenswrapper[4857]: E1128 13:30:44.228264 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:44 crc kubenswrapper[4857]: E1128 13:30:44.228511 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.325043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.325094 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.325106 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.325122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.325136 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.427843 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.427880 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.427906 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.427923 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.427964 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.532216 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.532275 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.532289 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.532317 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.532329 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.635427 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.635466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.635477 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.635494 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.635507 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.738170 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.738231 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.738241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.738254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.738265 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.841224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.841470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.841481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.841502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.841513 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.944932 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.945003 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.945062 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.945084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:44 crc kubenswrapper[4857]: I1128 13:30:44.945098 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:44Z","lastTransitionTime":"2025-11-28T13:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.048050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.048433 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.048446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.048508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.048521 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.151180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.151223 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.151234 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.151249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.151261 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.228452 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.228623 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.253283 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.253339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.253351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.253367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.253386 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.356315 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.356356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.356366 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.356383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.356395 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.363909 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.363964 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.363975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.363990 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.364000 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.380018 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.385372 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.385416 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.385428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.385464 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.385476 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.402305 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.406758 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.407050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.407156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.407265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.407355 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.421443 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.425581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.425634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.425646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.425671 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.425684 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.440887 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.445465 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.445708 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.445796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.445883 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.445987 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.461895 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:45Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:45 crc kubenswrapper[4857]: E1128 13:30:45.462449 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.464640 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.464817 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.464922 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.465049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.465131 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.568609 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.568682 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.568700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.568725 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.568743 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.671571 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.671737 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.671764 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.671791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.671805 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.774984 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.775050 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.775072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.775100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.775121 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.878274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.878344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.878365 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.878394 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.878418 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.982167 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.982241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.982264 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.982295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:45 crc kubenswrapper[4857]: I1128 13:30:45.982317 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:45Z","lastTransitionTime":"2025-11-28T13:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.086311 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.086371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.086383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.086398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.086409 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.189388 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.189444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.189459 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.189513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.189527 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.228706 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.228706 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.228989 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:46 crc kubenswrapper[4857]: E1128 13:30:46.228854 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:46 crc kubenswrapper[4857]: E1128 13:30:46.229143 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:46 crc kubenswrapper[4857]: E1128 13:30:46.229243 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.292291 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.292359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.292372 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.292395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.292406 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.395598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.395655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.395668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.395687 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.395700 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.498411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.498458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.498469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.498487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.498499 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.600661 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.600700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.600709 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.600725 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.600737 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.702939 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.702993 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.703002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.703016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.703028 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.805513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.805568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.805580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.805597 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.805610 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.908395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.908493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.908512 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.908569 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:46 crc kubenswrapper[4857]: I1128 13:30:46.908586 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:46Z","lastTransitionTime":"2025-11-28T13:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.011147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.011199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.011241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.011263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.011277 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.113199 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.113247 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.113256 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.113272 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.113287 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.216358 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.216406 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.216419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.216437 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.216449 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.227837 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:47 crc kubenswrapper[4857]: E1128 13:30:47.228103 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.318508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.318549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.318557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.318571 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.318581 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.421769 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.421850 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.421860 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.421876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.421891 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.524867 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.525035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.525109 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.525141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.525243 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.628086 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.628172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.628187 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.628211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.628224 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.730835 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.730888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.730903 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.730920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.730932 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.834152 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.834197 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.834209 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.834225 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.834239 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.937098 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.937163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.937175 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.937192 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:47 crc kubenswrapper[4857]: I1128 13:30:47.937205 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:47Z","lastTransitionTime":"2025-11-28T13:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.039369 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.039417 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.039428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.039444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.039459 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.142802 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.142873 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.142892 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.142918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.142936 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.228100 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.228191 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.228129 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:48 crc kubenswrapper[4857]: E1128 13:30:48.228299 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:48 crc kubenswrapper[4857]: E1128 13:30:48.228758 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:48 crc kubenswrapper[4857]: E1128 13:30:48.228815 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.247882 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.247975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.247995 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.248032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.248051 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.351332 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.351377 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.351387 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.351408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.351419 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.455110 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.455147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.455156 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.455172 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.455184 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.558197 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.558255 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.558271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.558290 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.558301 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.661014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.661049 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.661059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.661076 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.661087 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.764196 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.764279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.764302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.764333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.764355 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.868494 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.868549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.868560 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.868580 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.868593 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.972426 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.972480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.972491 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.972507 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:48 crc kubenswrapper[4857]: I1128 13:30:48.972518 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:48Z","lastTransitionTime":"2025-11-28T13:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.075574 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.075625 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.075635 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.075650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.075662 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.178171 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.178226 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.178238 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.178254 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.178265 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.228336 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:49 crc kubenswrapper[4857]: E1128 13:30:49.228496 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.280753 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.280796 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.280812 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.280827 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.280838 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.384331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.384375 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.384383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.384397 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.384406 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.486936 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.486987 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.486996 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.487009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.487018 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.590240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.590297 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.590312 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.590330 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.590344 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.693020 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.693078 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.693090 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.693107 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.693121 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.744318 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:49 crc kubenswrapper[4857]: E1128 13:30:49.744538 4857 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:30:49 crc kubenswrapper[4857]: E1128 13:30:49.744641 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs podName:0cf43f51-b79b-49fc-85ca-a245a248f27a nodeName:}" failed. No retries permitted until 2025-11-28 13:31:53.744611009 +0000 UTC m=+163.868552486 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs") pod "network-metrics-daemon-26tq7" (UID: "0cf43f51-b79b-49fc-85ca-a245a248f27a") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.795934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.796068 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.796093 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.796123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.796150 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.898370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.898410 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.898419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.898433 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:49 crc kubenswrapper[4857]: I1128 13:30:49.898445 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:49Z","lastTransitionTime":"2025-11-28T13:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.001399 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.001444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.001454 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.001470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.001483 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.104541 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.104600 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.104615 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.104632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.104645 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.208123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.208189 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.208202 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.208223 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.208242 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.228866 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.228930 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.228874 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:50 crc kubenswrapper[4857]: E1128 13:30:50.229059 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:50 crc kubenswrapper[4857]: E1128 13:30:50.229418 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:50 crc kubenswrapper[4857]: E1128 13:30:50.229615 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.260660 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.269108 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.286189 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.309602 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.309636 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.309651 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.309666 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.309677 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.310774 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.336882 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:36Z\\\",\\\"message\\\":\\\"er {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:30:36.045272 6961 services_controller.go:454] Service openshift-oauth-apiserver/api for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 13:30:36.045299 6961 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-oauth-apiserver/api_TCP_cluster\\\\\\\", UUID:\\\\\\\"fe46cb89-4e54-4175-a112-1c5224cd299e\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-oauth-apiserver/api\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Rou\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.355576 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.373740 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.387658 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.402665 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.413054 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.413111 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.413126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.413151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.413166 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.419617 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.432379 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.445349 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b8298b87-d55d-436d-8b66-848d1fb0563f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.463016 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.477870 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.492380 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.507784 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.516290 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.516352 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.516369 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.516395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.516411 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.528600 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.545471 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.560816 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:50Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.619581 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.619628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.619639 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.619662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.619674 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.722501 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.722553 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.722568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.722587 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.722600 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.825975 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.826030 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.826043 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.826063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.826076 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.929180 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.929246 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.929271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.929301 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:50 crc kubenswrapper[4857]: I1128 13:30:50.929321 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:50Z","lastTransitionTime":"2025-11-28T13:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.032280 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.032349 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.032370 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.032396 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.032414 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.136359 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.136413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.136431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.136458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.136475 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.228215 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:51 crc kubenswrapper[4857]: E1128 13:30:51.228668 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.229019 4857 scope.go:117] "RemoveContainer" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" Nov 28 13:30:51 crc kubenswrapper[4857]: E1128 13:30:51.229236 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.240003 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.240063 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.240085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.240109 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.240130 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.343036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.343096 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.343118 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.343142 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.343158 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.447071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.447138 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.447159 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.447191 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.447209 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.549889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.549942 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.549969 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.549988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.550004 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.653181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.653248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.653260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.653279 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.653299 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.756257 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.756336 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.756353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.756381 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.756397 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.858934 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.859011 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.859025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.859048 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.859061 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.962024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.962064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.962074 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.962089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:51 crc kubenswrapper[4857]: I1128 13:30:51.962100 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:51Z","lastTransitionTime":"2025-11-28T13:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.065504 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.065549 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.065561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.065582 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.065598 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.169344 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.169400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.169412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.169429 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.169447 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.228681 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:52 crc kubenswrapper[4857]: E1128 13:30:52.228858 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.229012 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.229036 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:52 crc kubenswrapper[4857]: E1128 13:30:52.229193 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:52 crc kubenswrapper[4857]: E1128 13:30:52.229673 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.273539 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.273600 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.273611 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.273628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.273637 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.376321 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.376398 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.376422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.376455 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.376478 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.483589 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.483642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.483652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.483668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.483678 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.586648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.586712 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.586724 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.586743 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.586757 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.689271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.689326 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.689337 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.689356 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.689371 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.792928 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.792988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.792998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.793014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.793025 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.896493 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.896557 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.896567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.896583 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:52 crc kubenswrapper[4857]: I1128 13:30:52.896597 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:52Z","lastTransitionTime":"2025-11-28T13:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.000728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.000789 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.000799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.000820 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.000833 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.103780 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.103842 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.103858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.103877 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.103891 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.205878 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.205979 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.205989 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.206005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.206022 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.228506 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:53 crc kubenswrapper[4857]: E1128 13:30:53.228778 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.308016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.308057 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.308067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.308103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.308113 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.410801 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.410845 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.410856 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.410872 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.410886 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.514654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.514748 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.514772 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.514806 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.514837 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.617871 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.617980 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.618005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.618038 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.618062 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.721260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.721328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.721338 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.721357 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.721367 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.824466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.824514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.824543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.824561 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.824574 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.927647 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.927711 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.927727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.927747 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:53 crc kubenswrapper[4857]: I1128 13:30:53.927761 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:53Z","lastTransitionTime":"2025-11-28T13:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.030771 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.030813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.030822 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.030840 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.030851 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.133289 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.133335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.133347 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.133362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.133374 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.228523 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.228582 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:54 crc kubenswrapper[4857]: E1128 13:30:54.228674 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.228807 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:54 crc kubenswrapper[4857]: E1128 13:30:54.228965 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:54 crc kubenswrapper[4857]: E1128 13:30:54.229079 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.235735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.235792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.235810 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.235828 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.235855 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.339384 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.339440 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.339459 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.339484 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.339505 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.441831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.441884 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.441893 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.441910 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.441923 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.544325 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.544372 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.544385 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.544401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.544412 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.646776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.646839 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.646857 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.646881 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.646899 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.750227 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.750295 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.750318 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.750348 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.750371 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.852668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.852701 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.852708 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.852721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.852731 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.955193 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.955240 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.955249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.955265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:54 crc kubenswrapper[4857]: I1128 13:30:54.955276 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:54Z","lastTransitionTime":"2025-11-28T13:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.058598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.058656 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.058672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.058690 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.058701 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.161243 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.161317 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.161340 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.161367 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.161390 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.228430 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.228605 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.265030 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.265100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.265120 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.265143 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.265161 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.369255 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.369306 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.369322 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.369345 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.369359 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.473073 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.473166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.473191 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.473224 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.473247 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.478053 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.478112 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.478126 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.478146 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.478165 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.496679 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.501648 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.501738 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.501759 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.501800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.501817 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.517325 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.521434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.521513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.521548 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.521589 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.521624 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.536899 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.541376 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.541458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.541474 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.541495 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.541507 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.558971 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.564091 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.564145 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.564160 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.564181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.564193 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.584726 4857 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404564Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865364Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2cd46941-ebc0-4d64-8ed5-520d3d122aa4\\\",\\\"systemUUID\\\":\\\"fb89c177-76d4-4664-9e2a-ee1d63d6009b\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:55Z is after 2025-08-24T17:21:41Z" Nov 28 13:30:55 crc kubenswrapper[4857]: E1128 13:30:55.584873 4857 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.587334 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.587411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.587431 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.587453 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.587465 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.690710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.690789 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.690803 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.690825 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.690866 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.794313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.794382 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.794395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.794415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.794429 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.898188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.898233 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.898242 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.898261 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:55 crc kubenswrapper[4857]: I1128 13:30:55.898273 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:55Z","lastTransitionTime":"2025-11-28T13:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.001411 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.001458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.001469 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.001487 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.001499 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.104290 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.104342 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.104354 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.104374 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.104389 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.207024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.207089 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.207102 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.207119 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.207132 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.228173 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.228179 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:56 crc kubenswrapper[4857]: E1128 13:30:56.228297 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.228407 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:56 crc kubenswrapper[4857]: E1128 13:30:56.228608 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:56 crc kubenswrapper[4857]: E1128 13:30:56.228797 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.310350 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.310392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.310401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.310416 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.310430 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.413534 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.413590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.413599 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.413616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.413628 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.516363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.516405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.516415 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.516430 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.516441 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.619274 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.619320 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.619333 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.619351 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.619363 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.722642 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.722700 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.722710 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.722728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.722740 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.825535 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.825588 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.825601 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.825620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.825634 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.928232 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.928276 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.928287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.928302 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:56 crc kubenswrapper[4857]: I1128 13:30:56.928313 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:56Z","lastTransitionTime":"2025-11-28T13:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.031363 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.031397 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.031405 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.031419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.031428 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.133961 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.134000 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.134011 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.134026 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.134036 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.228378 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:57 crc kubenswrapper[4857]: E1128 13:30:57.228548 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.236668 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.236697 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.236706 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.236721 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.236756 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.339241 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.339287 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.339297 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.339547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.339579 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.441698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.441768 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.441786 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.441816 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.441836 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.544662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.544917 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.544938 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.545010 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.545030 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.647472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.647516 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.647527 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.647547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.647560 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.749957 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.749998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.750008 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.750023 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.750034 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.852698 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.852736 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.852745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.852760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.852771 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.955609 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.955650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.955659 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.955676 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:57 crc kubenswrapper[4857]: I1128 13:30:57.955687 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:57Z","lastTransitionTime":"2025-11-28T13:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.058419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.058458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.058466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.058480 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.058491 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.160799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.160846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.160858 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.160874 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.160887 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.228172 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.228229 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:30:58 crc kubenswrapper[4857]: E1128 13:30:58.228321 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.228172 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:30:58 crc kubenswrapper[4857]: E1128 13:30:58.228438 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:30:58 crc kubenswrapper[4857]: E1128 13:30:58.228531 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.263081 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.263132 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.263147 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.263165 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.263180 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.366379 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.366432 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.366441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.366457 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.366471 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.468605 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.468646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.468654 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.468688 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.468705 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.571665 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.571711 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.571722 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.571738 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.571751 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.674513 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.674568 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.674586 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.674616 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.674635 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.777190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.777263 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.777292 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.777323 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.777346 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.880037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.880335 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.880401 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.880500 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.880588 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.984058 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.984141 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.984164 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.984188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:58 crc kubenswrapper[4857]: I1128 13:30:58.984205 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:58Z","lastTransitionTime":"2025-11-28T13:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.087375 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.087434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.087472 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.087489 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.087502 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.191205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.191277 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.191298 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.191329 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.191344 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.227929 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:30:59 crc kubenswrapper[4857]: E1128 13:30:59.228113 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.295181 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.295237 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.295249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.295271 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.295285 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.398576 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.398665 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.398693 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.398728 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.398753 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.502444 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.502503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.502514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.502533 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.502545 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.604986 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.605032 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.605046 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.605064 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.605078 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.708270 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.708392 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.708412 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.708438 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.708456 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.811085 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.811133 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.811145 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.811163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.811175 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.913731 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.913799 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.913815 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.913831 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:30:59 crc kubenswrapper[4857]: I1128 13:30:59.913860 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:30:59Z","lastTransitionTime":"2025-11-28T13:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.017445 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.017503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.017518 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.017536 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.017547 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.119598 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.119645 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.119655 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.119670 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.119686 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.222249 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.222320 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.222338 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.222364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.222382 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.228716 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.228809 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.228739 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:00 crc kubenswrapper[4857]: E1128 13:31:00.228917 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:00 crc kubenswrapper[4857]: E1128 13:31:00.229057 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:00 crc kubenswrapper[4857]: E1128 13:31:00.229179 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.242008 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.253561 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5d5445a4-417c-448a-a8a0-4a4f81828aff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b6a12dd82b0c9cead62090106601ac9f63756e082c27e97d73feb171f471aa45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-d582p\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-dshsf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.266488 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ggbvt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ca720c4-3756-47cc-b59d-3167a4141804\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ff85143fcc00cf4c2489c89938b1c68d14b5a05ec805b2b9523c00f67a8e1986\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5zxxn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ggbvt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.277451 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b8298b87-d55d-436d-8b66-848d1fb0563f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a4ebf7d9e3c843109e00e0275ae58a7c1ac629b408d5a60afb11e2a77adee6e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f6bc7875fb476c21ff62d60c4829056eaa350b20beff5473fc880ae20ede265\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.289352 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fea11d6b96c962ad6fdbc0c8c1bba362adb7ad2ecc0a9222ec9cf420c065c431\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.306532 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c1eeadf4bfbb7e301e655f343b527985c9d76cd31140c944956a084ecd55eaf8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.322663 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-26tq7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cf43f51-b79b-49fc-85ca-a245a248f27a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:45Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7gszj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:45Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-26tq7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.324888 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.324992 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.325007 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.325025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.325054 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.346008 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f90e446f-2ee0-4e9b-a0d9-9e92e64e0c3d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://10f327fe4acd76bf71128672e66dc01e2eee9224de01bb356b0eb7cec633c37a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://79786b3c521c60ad312c8360152cc00e4651827dd5b344f027c162852fda7683\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2b9c451fad3c8f775f6cd12019fa0734b08180a5f9db6629239e42ee7dc2aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6890e5083d46a2142758e2627a32e0737cff6a0e5b037b5388505be00bd442f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:15Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f4678ddc3433a145f1fdb1d0bde8226d5710e6fcd98cd49910a8a51005bc079\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d848aac34e5933419aad61d2b4b49046a7704d4fee23e53070d2946647d70e79\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d848aac34e5933419aad61d2b4b49046a7704d4fee23e53070d2946647d70e79\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://20ab98aa21c8412fcdc34158485cb7690fd8e8b4467bb1ab3086701997e2677c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://20ab98aa21c8412fcdc34158485cb7690fd8e8b4467bb1ab3086701997e2677c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://2a37878479145d6662bbf57bf6f0b3c23cbb3b5dfe077ba90c5d0fa30a5cda08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2a37878479145d6662bbf57bf6f0b3c23cbb3b5dfe077ba90c5d0fa30a5cda08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.358169 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f071736e-84b7-4bf0-a3b6-1b602ccf758b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f0f8136854f3ee524466787507edaeaea0988d170cac84f99e6eed944654e92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e4b96cfaffe29f2a4acaa34b338b97b2f9360011e1d40727fec89028ddb81e75\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5bde100c3e7d63ac9dae8dcecc01d2f18a0aef8c418d1774be9d74c9b611c7ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.373803 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qbndv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1dbaee30-9cc4-4ff6-b89f-afe2b1a1769b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52d03fd9944b8321b802a2a4317030abb1644c2c52c849ca35b5ba1eee3ebb4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://fb87483395f5809dcd6ceefe05556779198c968fe09c8bcf19d9a210680289aa\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3c02315404663ee36aebef9497133d4e7f21476dc588b7d3911bbd7aa5b1173\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f62bfc5cca4768e4540c2b95291d4a776ca21cceb0013e19b327b6bcc89302ae\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4d262c15a0fafb03375560d6888677c28e4fd9b29f532d4fa991bbda462dcb5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d3fd1519d435c8b3068404d621cb9aa22e488724317b017e00ac064cf594da38\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ee5711f069034b14ea1bdf889cd44ce79deebfaa9a92d654775154d148c9a350\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-f6xvn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qbndv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.392743 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:36Z\\\",\\\"message\\\":\\\"er {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:30:36Z is after 2025-08-24T17:21:41Z]\\\\nI1128 13:30:36.045272 6961 services_controller.go:454] Service openshift-oauth-apiserver/api for network=default has 1 cluster-wide, 0 per-node configs, 0 template configs, making 1 (cluster) 0 (per node) and 0 (template) load balancers\\\\nI1128 13:30:36.045299 6961 services_controller.go:473] Services do not match for network=default, existing lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-oauth-apiserver/api_TCP_cluster\\\\\\\", UUID:\\\\\\\"fe46cb89-4e54-4175-a112-1c5224cd299e\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-oauth-apiserver/api\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Rou\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:30:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-brfsw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-h8td2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.406209 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"adf71647-2fc9-4497-81ee-84e6373498b4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://57e2a3423c1095d435ed7835a89df7e3e8f12c8df10707e1a82ae4595fadf4c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://91d598096a9163436c21991255cd21335da8307e56e5607288d289da81d77578\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mfpzd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:44Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-kmjsb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.422857 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.427662 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.427703 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.427717 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.427735 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.427747 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.437980 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42ce4fa3ab1e843ff3c77a345d3544492835597daa04e6b3caddc5d10006a908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50cef0179c99890d31de04c695c7523e6007aebc2847a2e81ac5d2b72942ce77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.451534 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:28Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.464772 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rb7tq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"787c64de-9ce4-41eb-a525-948c23e84595\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-28T13:30:19Z\\\",\\\"message\\\":\\\"2025-11-28T13:29:33+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744\\\\n2025-11-28T13:29:33+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_4169fb9e-7cb6-454a-8caa-0f2881067744 to /host/opt/cni/bin/\\\\n2025-11-28T13:29:33Z [verbose] multus-daemon started\\\\n2025-11-28T13:29:33Z [verbose] Readiness Indicator file check\\\\n2025-11-28T13:30:18Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rmswj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rb7tq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.475604 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-cs9jw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c15e860e-f7d7-4bdb-b09b-b6099204b5e4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6aa30f3141838a2780e6e300545fcb2d71a80ed4b146759f8dff91c976c3f36a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zchsr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:34Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-cs9jw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.489701 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c268e4-8cda-4a40-a67e-90e06bd93d09\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-28T13:29:30Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1128 13:29:23.832276 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1128 13:29:23.834735 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-389711911/tls.crt::/tmp/serving-cert-389711911/tls.key\\\\\\\"\\\\nI1128 13:29:30.058283 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1128 13:29:30.061380 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1128 13:29:30.061401 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1128 13:29:30.061430 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1128 13:29:30.061435 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1128 13:29:30.066406 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1128 13:29:30.066451 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066458 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1128 13:29:30.066465 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1128 13:29:30.066469 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1128 13:29:30.066473 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1128 13:29:30.066477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1128 13:29:30.066414 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1128 13:29:30.070108 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.501530 4857 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3ea3abc8-9e6f-454d-ac25-f04483a5d4d5\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:30:02Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-28T13:29:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93cf74b7715a705e22a03a506d90d151a961e80a83a4aeda3d4e361cfc2a178d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dc226ca8af9c501cbd610bcd82596f359f573fc294ce0830056d1d59bd066b0e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8caf792b0a8768a68663568cc8a1cdca8c4f3394f30fe098841c6e37b9cf9980\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-28T13:29:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5c76797e337ee42b458f3cc8108b46bd86b18d98a1a07ed60df3778203feb39\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-28T13:29:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-28T13:29:11Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-28T13:29:10Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-28T13:31:00Z is after 2025-08-24T17:21:41Z" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.531572 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.531800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.531915 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.532072 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.532198 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.638543 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.638579 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.638589 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.638604 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.638615 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.740902 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.740970 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.740988 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.741009 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.741024 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.843419 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.843466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.843481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.843502 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.843520 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.945391 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.945428 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.945437 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.945451 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:00 crc kubenswrapper[4857]: I1128 13:31:00.945466 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:00Z","lastTransitionTime":"2025-11-28T13:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.048362 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.048400 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.048408 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.048446 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.048458 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.152470 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.152918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.153148 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.153353 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.153577 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.227687 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:01 crc kubenswrapper[4857]: E1128 13:31:01.227882 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.256759 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.256800 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.256811 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.256827 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.256838 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.359847 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.360251 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.360378 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.360552 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.360717 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.462920 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.463140 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.463176 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.463201 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.463214 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.565859 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.565908 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.565919 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.565939 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.565976 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.668916 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.669016 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.669036 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.669067 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.669084 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.773100 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.773163 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.773187 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.773221 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.773244 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.876013 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.876059 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.876069 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.876084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.876095 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.978876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.978978 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.978998 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.979022 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:01 crc kubenswrapper[4857]: I1128 13:31:01.979039 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:01Z","lastTransitionTime":"2025-11-28T13:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.081508 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.081565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.081574 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.081590 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.081602 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.184813 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.184865 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.184875 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.184890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.184901 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.228273 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.228484 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:02 crc kubenswrapper[4857]: E1128 13:31:02.228575 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:02 crc kubenswrapper[4857]: E1128 13:31:02.228666 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.228774 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:02 crc kubenswrapper[4857]: E1128 13:31:02.229069 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.287387 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.287434 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.287443 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.287458 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.287474 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.391617 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.391652 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.391660 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.391673 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.391682 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.495924 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.496002 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.496014 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.496033 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.496044 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.599018 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.599071 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.599084 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.599103 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.599126 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.703201 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.703288 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.703328 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.703364 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.703389 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.806565 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.806620 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.806632 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.806650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.806662 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.908982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.909024 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.909035 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.909051 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:02 crc kubenswrapper[4857]: I1128 13:31:02.909063 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:02Z","lastTransitionTime":"2025-11-28T13:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.011848 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.011892 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.011902 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.011918 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.011929 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.114940 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.114993 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.115005 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.115025 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.115036 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.218425 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.218475 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.218485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.218503 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.218515 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.228852 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:03 crc kubenswrapper[4857]: E1128 13:31:03.229072 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.322413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.322471 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.322485 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.322505 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.322517 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.426339 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.426402 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.426416 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.426441 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.426453 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.529890 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.529969 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.529982 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.530003 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.530027 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.633166 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.633227 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.633236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.633258 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.633269 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.735767 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.735896 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.735910 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.735925 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.735936 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.838790 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.838846 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.838857 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.838876 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.838891 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.941123 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.941173 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.941188 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.941205 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:03 crc kubenswrapper[4857]: I1128 13:31:03.941217 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:03Z","lastTransitionTime":"2025-11-28T13:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.044380 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.044424 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.044435 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.044451 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.044462 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.147212 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.147265 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.147282 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.147307 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.147324 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.228919 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.228999 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:04 crc kubenswrapper[4857]: E1128 13:31:04.229195 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.229314 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:04 crc kubenswrapper[4857]: E1128 13:31:04.229851 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:04 crc kubenswrapper[4857]: E1128 13:31:04.230442 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.230926 4857 scope.go:117] "RemoveContainer" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" Nov 28 13:31:04 crc kubenswrapper[4857]: E1128 13:31:04.231224 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-h8td2_openshift-ovn-kubernetes(46c5e02c-be1a-45b7-86ef-cc8c484c4f71)\"" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.250531 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.250614 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.250628 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.250650 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.250664 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.353889 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.353965 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.353981 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.354001 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.354017 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.457044 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.457122 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.457151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.457190 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.457217 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.561037 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.561108 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.561125 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.561151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.561171 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.663704 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.663748 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.663761 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.663778 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.663791 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.766371 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.766414 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.766447 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.766466 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.766478 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.869151 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.869217 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.869236 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.869260 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.869279 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.971519 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.972320 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.972422 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.972521 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:04 crc kubenswrapper[4857]: I1128 13:31:04.972620 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:04Z","lastTransitionTime":"2025-11-28T13:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.076248 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.076319 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.076331 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.076354 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.076369 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.179101 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.179399 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.179481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.179567 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.179844 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.227749 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:05 crc kubenswrapper[4857]: E1128 13:31:05.227913 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.282373 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.282665 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.282791 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.282891 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.283014 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.386211 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.386481 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.386564 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.386634 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.386699 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.490450 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.490514 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.490528 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.490547 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.490558 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.594672 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.594745 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.594760 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.594782 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.594794 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.603739 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/1.log" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.604290 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/0.log" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.604347 4857 generic.go:334] "Generic (PLEG): container finished" podID="787c64de-9ce4-41eb-a525-948c23e84595" containerID="d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5" exitCode=1 Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.604390 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerDied","Data":"d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.604438 4857 scope.go:117] "RemoveContainer" containerID="d1e973d793da5f49ac48e8690568000d3700cc2c585ccaf0b14f369341847395" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.604970 4857 scope.go:117] "RemoveContainer" containerID="d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5" Nov 28 13:31:05 crc kubenswrapper[4857]: E1128 13:31:05.605159 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-rb7tq_openshift-multus(787c64de-9ce4-41eb-a525-948c23e84595)\"" pod="openshift-multus/multus-rb7tq" podUID="787c64de-9ce4-41eb-a525-948c23e84595" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.630166 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=62.630136149 podStartE2EDuration="1m2.630136149s" podCreationTimestamp="2025-11-28 13:30:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.629687077 +0000 UTC m=+115.753628554" watchObservedRunningTime="2025-11-28 13:31:05.630136149 +0000 UTC m=+115.754077626" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.694347 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-cs9jw" podStartSLOduration=94.694275498 podStartE2EDuration="1m34.694275498s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.675614947 +0000 UTC m=+115.799556384" watchObservedRunningTime="2025-11-28 13:31:05.694275498 +0000 UTC m=+115.818216935" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.698727 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.698766 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.698776 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.698792 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.698805 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.713501 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=94.713479413 podStartE2EDuration="1m34.713479413s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.69548291 +0000 UTC m=+115.819424337" watchObservedRunningTime="2025-11-28 13:31:05.713479413 +0000 UTC m=+115.837420840" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.738313 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.738383 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.738395 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.738413 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.738429 4857 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-28T13:31:05Z","lastTransitionTime":"2025-11-28T13:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.768176 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podStartSLOduration=94.768156393 podStartE2EDuration="1m34.768156393s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.746570495 +0000 UTC m=+115.870511952" watchObservedRunningTime="2025-11-28 13:31:05.768156393 +0000 UTC m=+115.892097830" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.780525 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-ggbvt" podStartSLOduration=94.780497318 podStartE2EDuration="1m34.780497318s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.767739842 +0000 UTC m=+115.891681279" watchObservedRunningTime="2025-11-28 13:31:05.780497318 +0000 UTC m=+115.904438755" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.783153 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=30.783136447 podStartE2EDuration="30.783136447s" podCreationTimestamp="2025-11-28 13:30:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.781423852 +0000 UTC m=+115.905365289" watchObservedRunningTime="2025-11-28 13:31:05.783136447 +0000 UTC m=+115.907077884" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.783424 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz"] Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.783794 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.785877 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.786086 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.786146 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.786689 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.798690 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=94.798669366 podStartE2EDuration="1m34.798669366s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.798176663 +0000 UTC m=+115.922118100" watchObservedRunningTime="2025-11-28 13:31:05.798669366 +0000 UTC m=+115.922610803" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.825477 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1494aa42-a747-41a9-93c3-0fd722a8600d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.825559 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1494aa42-a747-41a9-93c3-0fd722a8600d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.825591 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1494aa42-a747-41a9-93c3-0fd722a8600d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.825635 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1494aa42-a747-41a9-93c3-0fd722a8600d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.825683 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1494aa42-a747-41a9-93c3-0fd722a8600d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.855040 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=15.854996739 podStartE2EDuration="15.854996739s" podCreationTimestamp="2025-11-28 13:30:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.854513626 +0000 UTC m=+115.978455073" watchObservedRunningTime="2025-11-28 13:31:05.854996739 +0000 UTC m=+115.978938166" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.892242 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-qbndv" podStartSLOduration=94.892218159 podStartE2EDuration="1m34.892218159s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.890937815 +0000 UTC m=+116.014879252" watchObservedRunningTime="2025-11-28 13:31:05.892218159 +0000 UTC m=+116.016159596" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926217 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1494aa42-a747-41a9-93c3-0fd722a8600d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926279 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1494aa42-a747-41a9-93c3-0fd722a8600d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926326 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1494aa42-a747-41a9-93c3-0fd722a8600d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926355 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1494aa42-a747-41a9-93c3-0fd722a8600d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926376 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1494aa42-a747-41a9-93c3-0fd722a8600d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926435 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1494aa42-a747-41a9-93c3-0fd722a8600d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.926462 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1494aa42-a747-41a9-93c3-0fd722a8600d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.927483 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1494aa42-a747-41a9-93c3-0fd722a8600d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.936892 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1494aa42-a747-41a9-93c3-0fd722a8600d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.960156 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1494aa42-a747-41a9-93c3-0fd722a8600d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-gbqpz\" (UID: \"1494aa42-a747-41a9-93c3-0fd722a8600d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:05 crc kubenswrapper[4857]: I1128 13:31:05.974973 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-kmjsb" podStartSLOduration=93.974927356 podStartE2EDuration="1m33.974927356s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:05.943466808 +0000 UTC m=+116.067408255" watchObservedRunningTime="2025-11-28 13:31:05.974927356 +0000 UTC m=+116.098868803" Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.095484 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.227794 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.227798 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:06 crc kubenswrapper[4857]: E1128 13:31:06.228281 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.227859 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:06 crc kubenswrapper[4857]: E1128 13:31:06.228473 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:06 crc kubenswrapper[4857]: E1128 13:31:06.228717 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.608573 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" event={"ID":"1494aa42-a747-41a9-93c3-0fd722a8600d","Type":"ContainerStarted","Data":"2235e95ead742e5810458431df5e041dc7bbab3ada2fc0ac62b61b4e7c3efe81"} Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.608629 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" event={"ID":"1494aa42-a747-41a9-93c3-0fd722a8600d","Type":"ContainerStarted","Data":"0bbd90ce0bbfc0379bf64f2d2837a802d0b2bf341a5bd2b631c8a54c7b5f6e1d"} Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.610473 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/1.log" Nov 28 13:31:06 crc kubenswrapper[4857]: I1128 13:31:06.632689 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-gbqpz" podStartSLOduration=95.632669502 podStartE2EDuration="1m35.632669502s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:06.631591013 +0000 UTC m=+116.755532450" watchObservedRunningTime="2025-11-28 13:31:06.632669502 +0000 UTC m=+116.756610939" Nov 28 13:31:07 crc kubenswrapper[4857]: I1128 13:31:07.228026 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:07 crc kubenswrapper[4857]: E1128 13:31:07.228198 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:08 crc kubenswrapper[4857]: I1128 13:31:08.228900 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:08 crc kubenswrapper[4857]: I1128 13:31:08.228998 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:08 crc kubenswrapper[4857]: I1128 13:31:08.229028 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:08 crc kubenswrapper[4857]: E1128 13:31:08.229079 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:08 crc kubenswrapper[4857]: E1128 13:31:08.229515 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:08 crc kubenswrapper[4857]: E1128 13:31:08.229723 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:09 crc kubenswrapper[4857]: I1128 13:31:09.228400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:09 crc kubenswrapper[4857]: E1128 13:31:09.228570 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:10 crc kubenswrapper[4857]: I1128 13:31:10.228527 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:10 crc kubenswrapper[4857]: I1128 13:31:10.228668 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:10 crc kubenswrapper[4857]: I1128 13:31:10.228737 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:10 crc kubenswrapper[4857]: E1128 13:31:10.230008 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:10 crc kubenswrapper[4857]: E1128 13:31:10.230108 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:10 crc kubenswrapper[4857]: E1128 13:31:10.230142 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:10 crc kubenswrapper[4857]: E1128 13:31:10.231763 4857 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 28 13:31:10 crc kubenswrapper[4857]: E1128 13:31:10.309843 4857 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:31:11 crc kubenswrapper[4857]: I1128 13:31:11.228533 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:11 crc kubenswrapper[4857]: E1128 13:31:11.228712 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:12 crc kubenswrapper[4857]: I1128 13:31:12.228614 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:12 crc kubenswrapper[4857]: I1128 13:31:12.228640 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:12 crc kubenswrapper[4857]: I1128 13:31:12.228640 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:12 crc kubenswrapper[4857]: E1128 13:31:12.228990 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:12 crc kubenswrapper[4857]: E1128 13:31:12.229077 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:12 crc kubenswrapper[4857]: E1128 13:31:12.228810 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:13 crc kubenswrapper[4857]: I1128 13:31:13.228296 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:13 crc kubenswrapper[4857]: E1128 13:31:13.228467 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:14 crc kubenswrapper[4857]: I1128 13:31:14.228403 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:14 crc kubenswrapper[4857]: I1128 13:31:14.228480 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:14 crc kubenswrapper[4857]: E1128 13:31:14.228576 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:14 crc kubenswrapper[4857]: I1128 13:31:14.228498 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:14 crc kubenswrapper[4857]: E1128 13:31:14.228644 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:14 crc kubenswrapper[4857]: E1128 13:31:14.228759 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:15 crc kubenswrapper[4857]: I1128 13:31:15.228659 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:15 crc kubenswrapper[4857]: E1128 13:31:15.228832 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:15 crc kubenswrapper[4857]: E1128 13:31:15.310880 4857 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.229133 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.229246 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.229371 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:16 crc kubenswrapper[4857]: E1128 13:31:16.229409 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:16 crc kubenswrapper[4857]: E1128 13:31:16.229516 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:16 crc kubenswrapper[4857]: E1128 13:31:16.229273 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.230300 4857 scope.go:117] "RemoveContainer" containerID="d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5" Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.647455 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/1.log" Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.647854 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerStarted","Data":"586107b95ceda2408d672f603658f1252e6cbceddc10c0ad76403f9446812f05"} Nov 28 13:31:16 crc kubenswrapper[4857]: I1128 13:31:16.664523 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rb7tq" podStartSLOduration=105.664503505 podStartE2EDuration="1m45.664503505s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:16.663702054 +0000 UTC m=+126.787643541" watchObservedRunningTime="2025-11-28 13:31:16.664503505 +0000 UTC m=+126.788444942" Nov 28 13:31:17 crc kubenswrapper[4857]: I1128 13:31:17.228195 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:17 crc kubenswrapper[4857]: E1128 13:31:17.228499 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.228439 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.228496 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.228630 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:18 crc kubenswrapper[4857]: E1128 13:31:18.228625 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:18 crc kubenswrapper[4857]: E1128 13:31:18.228931 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:18 crc kubenswrapper[4857]: E1128 13:31:18.229148 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.229925 4857 scope.go:117] "RemoveContainer" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.657313 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/3.log" Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.660188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerStarted","Data":"1690dc88e10e9aa6e4e4415adb0c9a94115389314a85576d033f0fccefa5943b"} Nov 28 13:31:18 crc kubenswrapper[4857]: I1128 13:31:18.661095 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:31:19 crc kubenswrapper[4857]: I1128 13:31:19.001812 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podStartSLOduration=108.001788205 podStartE2EDuration="1m48.001788205s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:18.685660463 +0000 UTC m=+128.809601920" watchObservedRunningTime="2025-11-28 13:31:19.001788205 +0000 UTC m=+129.125729642" Nov 28 13:31:19 crc kubenswrapper[4857]: I1128 13:31:19.002586 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-26tq7"] Nov 28 13:31:19 crc kubenswrapper[4857]: I1128 13:31:19.002688 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:19 crc kubenswrapper[4857]: E1128 13:31:19.002770 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:19 crc kubenswrapper[4857]: I1128 13:31:19.765863 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:19 crc kubenswrapper[4857]: E1128 13:31:19.766030 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:19 crc kubenswrapper[4857]: I1128 13:31:19.766091 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:19 crc kubenswrapper[4857]: E1128 13:31:19.766160 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:19 crc kubenswrapper[4857]: I1128 13:31:19.766228 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:19 crc kubenswrapper[4857]: E1128 13:31:19.766378 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:20 crc kubenswrapper[4857]: I1128 13:31:20.228026 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:20 crc kubenswrapper[4857]: E1128 13:31:20.229254 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:20 crc kubenswrapper[4857]: E1128 13:31:20.316284 4857 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:31:21 crc kubenswrapper[4857]: I1128 13:31:21.228480 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:21 crc kubenswrapper[4857]: I1128 13:31:21.228484 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:21 crc kubenswrapper[4857]: E1128 13:31:21.229543 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:21 crc kubenswrapper[4857]: E1128 13:31:21.229237 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:22 crc kubenswrapper[4857]: I1128 13:31:22.228407 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:22 crc kubenswrapper[4857]: I1128 13:31:22.228431 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:22 crc kubenswrapper[4857]: E1128 13:31:22.228560 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:22 crc kubenswrapper[4857]: E1128 13:31:22.228771 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:23 crc kubenswrapper[4857]: I1128 13:31:23.228059 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:23 crc kubenswrapper[4857]: I1128 13:31:23.228074 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:23 crc kubenswrapper[4857]: E1128 13:31:23.228225 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:23 crc kubenswrapper[4857]: E1128 13:31:23.228328 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:24 crc kubenswrapper[4857]: I1128 13:31:24.228110 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:24 crc kubenswrapper[4857]: I1128 13:31:24.228110 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:24 crc kubenswrapper[4857]: E1128 13:31:24.228273 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 28 13:31:24 crc kubenswrapper[4857]: E1128 13:31:24.228361 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-26tq7" podUID="0cf43f51-b79b-49fc-85ca-a245a248f27a" Nov 28 13:31:25 crc kubenswrapper[4857]: I1128 13:31:25.228498 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:25 crc kubenswrapper[4857]: E1128 13:31:25.228624 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 28 13:31:25 crc kubenswrapper[4857]: I1128 13:31:25.228498 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:25 crc kubenswrapper[4857]: E1128 13:31:25.228739 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 28 13:31:25 crc kubenswrapper[4857]: I1128 13:31:25.535009 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.164646 4857 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.216223 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cvb79"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.217021 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.217598 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.219272 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.219095 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-qp4rc"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.220521 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.221150 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.221381 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.221982 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.222180 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.222599 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.223321 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9sptz"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.224289 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.225051 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-nm6c5"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.225489 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.225803 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.240553 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-5c5lq"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.241406 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.242745 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.244146 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.244156 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.244223 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.244788 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.244798 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.245015 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.268779 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.269047 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270131 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270365 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270483 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270505 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270534 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270620 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270652 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270769 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270778 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270807 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270851 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270873 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270774 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270918 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.270989 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271029 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271041 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271093 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271107 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271123 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271184 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271202 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271040 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271290 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271249 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271334 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271206 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271297 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271403 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271339 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271292 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271554 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271768 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271819 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.271983 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.272018 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.272143 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.272365 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.272382 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.274315 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.282575 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-prc44"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.283026 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bp9rq"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.283410 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-m658x"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.283740 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.283969 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.283995 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.284086 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.284261 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.289837 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.290338 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-6gf7t"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.291039 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.291103 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.291123 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.292294 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.293059 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jp48x"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.293667 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.294545 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.294732 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.294840 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.294932 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.300876 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.301920 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.302556 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303198 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303414 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303427 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303654 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303839 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303928 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303991 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.304024 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.304111 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.303992 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.304641 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.304775 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.304888 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.305222 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.305351 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.305350 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.305834 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6qvft"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.306208 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-98dmw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.306568 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.306691 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.306865 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.307407 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.307964 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.308022 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.308078 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.308137 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.308153 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.308220 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.314348 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.318984 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.322402 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.322531 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.322619 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.323668 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.323822 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.324124 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.324180 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.324127 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.324367 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.324665 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.324871 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.325462 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.326590 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.329632 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.336081 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.338815 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-ghlnz"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.329895 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.340346 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.341100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4546bf69-bd62-41a8-ade2-31c4e7f198da-auth-proxy-config\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343297 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4546bf69-bd62-41a8-ade2-31c4e7f198da-machine-approver-tls\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/951e34a8-00f5-479a-9de8-ee53ee32da75-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.341057 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343367 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de71ba59-fb73-4f54-be20-690e4e94b446-config\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343405 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-serving-cert\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.342356 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343435 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5sv8\" (UniqueName: \"kubernetes.io/projected/4546bf69-bd62-41a8-ade2-31c4e7f198da-kube-api-access-g5sv8\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343537 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzbgv\" (UniqueName: \"kubernetes.io/projected/a73f6e61-db8f-4ada-8825-201429e1f803-kube-api-access-kzbgv\") pod \"cluster-samples-operator-665b6dd947-mrg8q\" (UID: \"a73f6e61-db8f-4ada-8825-201429e1f803\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343570 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-222kx\" (UniqueName: \"kubernetes.io/projected/951e34a8-00f5-479a-9de8-ee53ee32da75-kube-api-access-222kx\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-images\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343637 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/951e34a8-00f5-479a-9de8-ee53ee32da75-serving-cert\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343662 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343702 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qt4xz\" (UniqueName: \"kubernetes.io/projected/de71ba59-fb73-4f54-be20-690e4e94b446-kube-api-access-qt4xz\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343729 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-audit\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343772 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-node-pullsecrets\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343811 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-etcd-client\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343843 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-config\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343863 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-etcd-serving-ca\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.343976 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de71ba59-fb73-4f54-be20-690e4e94b446-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344009 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-serving-cert\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344031 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a73f6e61-db8f-4ada-8825-201429e1f803-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-mrg8q\" (UID: \"a73f6e61-db8f-4ada-8825-201429e1f803\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344055 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmjnv\" (UniqueName: \"kubernetes.io/projected/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-kube-api-access-dmjnv\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344080 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4546bf69-bd62-41a8-ade2-31c4e7f198da-config\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plnpj\" (UniqueName: \"kubernetes.io/projected/1cb29ded-850a-45ff-8201-a991fe779c01-kube-api-access-plnpj\") pod \"downloads-7954f5f757-5c5lq\" (UID: \"1cb29ded-850a-45ff-8201-a991fe779c01\") " pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344137 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344158 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-encryption-config\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344181 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344216 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrrrb\" (UniqueName: \"kubernetes.io/projected/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-kube-api-access-nrrrb\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344239 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-config\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344267 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-image-import-ca\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344292 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-audit-dir\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344316 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-client-ca\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-config\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.344368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdslz\" (UniqueName: \"kubernetes.io/projected/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-kube-api-access-qdslz\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.345439 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.345611 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.346438 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.347546 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.347557 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.347652 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.347917 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.348331 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v825d"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.348704 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.349131 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.349468 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.352328 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.353654 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.354431 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.359119 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.359349 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.360332 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.360626 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.360820 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.360917 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.361158 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.361286 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.361368 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.361373 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.362170 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.362663 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.363162 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.363796 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.364013 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.364312 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7ch8g"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.373296 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.375082 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.375353 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.375467 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.379492 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.379907 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.380129 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.380442 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.381357 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plzgn"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.382237 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.385897 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.385943 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.386309 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cvb79"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.386380 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.386586 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.388155 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-prc44"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.389998 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-qp4rc"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.390212 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-nm6c5"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.391488 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-g79kz"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.392337 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.392837 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.394151 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.395858 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.396826 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.397816 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9sptz"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.399201 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-m658x"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.400409 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-6gf7t"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.400482 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.401782 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bp9rq"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.403004 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.404158 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.405498 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-5c5lq"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.408021 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v825d"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.409042 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.410087 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jp48x"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.411150 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.412481 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.413596 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.414999 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jjd26"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.416169 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.416256 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.417714 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.418469 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7ch8g"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.419425 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.419929 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.421521 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6qvft"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.429420 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.436510 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.438388 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-98dmw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.440049 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.440729 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.441285 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.443277 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.444626 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jjd26"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445297 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445461 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-config\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445488 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/492c9136-a973-43ca-ad2c-4650392e6e38-webhook-cert\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445513 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qt4xz\" (UniqueName: \"kubernetes.io/projected/de71ba59-fb73-4f54-be20-690e4e94b446-kube-api-access-qt4xz\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445536 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-audit\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445572 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445588 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-audit-dir\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445604 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b88b052c-169c-4802-8705-bbd084630a7a-srv-cert\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445643 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-node-pullsecrets\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445661 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-etcd-client\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445679 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-encryption-config\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445696 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-config\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445711 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-etcd-serving-ca\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445728 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7hsj\" (UniqueName: \"kubernetes.io/projected/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-kube-api-access-v7hsj\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9f8f4\" (UniqueName: \"kubernetes.io/projected/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-kube-api-access-9f8f4\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445766 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef70370a-9dc0-4378-b4f5-ae6b279330b4-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v825d\" (UID: \"ef70370a-9dc0-4378-b4f5-ae6b279330b4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445782 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt4rk\" (UniqueName: \"kubernetes.io/projected/49fd1de8-3f87-4d33-b209-df83dd1096a8-kube-api-access-gt4rk\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445809 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de71ba59-fb73-4f54-be20-690e4e94b446-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445827 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpjwv\" (UniqueName: \"kubernetes.io/projected/ef70370a-9dc0-4378-b4f5-ae6b279330b4-kube-api-access-dpjwv\") pod \"multus-admission-controller-857f4d67dd-v825d\" (UID: \"ef70370a-9dc0-4378-b4f5-ae6b279330b4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445846 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-serving-cert\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445884 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnphq\" (UniqueName: \"kubernetes.io/projected/1d2f2183-42ac-4260-88cd-9379d25bf71d-kube-api-access-xnphq\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445900 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-serving-cert\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445900 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-node-pullsecrets\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445919 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4427cefb-2293-4022-afed-686499999cef-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445937 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-audit-policies\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.445982 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6bc1489-81cc-49df-a936-c0c19721b8c3-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446005 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmjnv\" (UniqueName: \"kubernetes.io/projected/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-kube-api-access-dmjnv\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a73f6e61-db8f-4ada-8825-201429e1f803-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-mrg8q\" (UID: \"a73f6e61-db8f-4ada-8825-201429e1f803\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446040 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446107 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4546bf69-bd62-41a8-ade2-31c4e7f198da-config\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446125 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-etcd-client\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-encryption-config\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446177 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plnpj\" (UniqueName: \"kubernetes.io/projected/1cb29ded-850a-45ff-8201-a991fe779c01-kube-api-access-plnpj\") pod \"downloads-7954f5f757-5c5lq\" (UID: \"1cb29ded-850a-45ff-8201-a991fe779c01\") " pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446193 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446210 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d2f2183-42ac-4260-88cd-9379d25bf71d-serving-cert\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446228 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446250 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrrrb\" (UniqueName: \"kubernetes.io/projected/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-kube-api-access-nrrrb\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446277 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-config\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446353 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-image-import-ca\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446387 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-audit-dir\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446393 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446410 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-client-ca\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446433 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446452 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d2f2183-42ac-4260-88cd-9379d25bf71d-config\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446471 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/34073888-721b-429f-81c5-e12d90b5b7e8-images\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446487 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4427cefb-2293-4022-afed-686499999cef-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446506 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-config\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446523 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdslz\" (UniqueName: \"kubernetes.io/projected/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-kube-api-access-qdslz\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446549 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-serving-cert\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446564 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446600 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pj7jg\" (UniqueName: \"kubernetes.io/projected/492c9136-a973-43ca-ad2c-4650392e6e38-kube-api-access-pj7jg\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446616 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4546bf69-bd62-41a8-ade2-31c4e7f198da-auth-proxy-config\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446635 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4546bf69-bd62-41a8-ade2-31c4e7f198da-machine-approver-tls\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446677 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446698 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/492c9136-a973-43ca-ad2c-4650392e6e38-tmpfs\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446713 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/492c9136-a973-43ca-ad2c-4650392e6e38-apiservice-cert\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446730 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6bc1489-81cc-49df-a936-c0c19721b8c3-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446747 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/34073888-721b-429f-81c5-e12d90b5b7e8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446762 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvgsg\" (UniqueName: \"kubernetes.io/projected/34073888-721b-429f-81c5-e12d90b5b7e8-kube-api-access-fvgsg\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446780 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b88b052c-169c-4802-8705-bbd084630a7a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446795 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4427cefb-2293-4022-afed-686499999cef-config\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446814 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de71ba59-fb73-4f54-be20-690e4e94b446-config\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446830 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/951e34a8-00f5-479a-9de8-ee53ee32da75-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446847 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-serving-cert\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446870 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5sv8\" (UniqueName: \"kubernetes.io/projected/4546bf69-bd62-41a8-ade2-31c4e7f198da-kube-api-access-g5sv8\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446887 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkbv8\" (UniqueName: \"kubernetes.io/projected/b88b052c-169c-4802-8705-bbd084630a7a-kube-api-access-gkbv8\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-audit\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446902 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-trusted-ca\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446960 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-audit-dir\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.446985 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzbgv\" (UniqueName: \"kubernetes.io/projected/a73f6e61-db8f-4ada-8825-201429e1f803-kube-api-access-kzbgv\") pod \"cluster-samples-operator-665b6dd947-mrg8q\" (UID: \"a73f6e61-db8f-4ada-8825-201429e1f803\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447020 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49fd1de8-3f87-4d33-b209-df83dd1096a8-secret-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447071 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-images\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447074 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-etcd-serving-ca\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447099 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-222kx\" (UniqueName: \"kubernetes.io/projected/951e34a8-00f5-479a-9de8-ee53ee32da75-kube-api-access-222kx\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447512 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-image-import-ca\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447130 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqc7h\" (UniqueName: \"kubernetes.io/projected/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-kube-api-access-nqc7h\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447742 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447770 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/34073888-721b-429f-81c5-e12d90b5b7e8-proxy-tls\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447796 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6bc1489-81cc-49df-a936-c0c19721b8c3-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.447926 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4546bf69-bd62-41a8-ade2-31c4e7f198da-config\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.448203 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.448358 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-client-ca\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.448695 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-config\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.451664 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.452528 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plzgn"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.452581 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-g79kz"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.452596 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qnqmv"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.452809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-config\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.452576 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/4546bf69-bd62-41a8-ade2-31c4e7f198da-machine-approver-tls\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.455063 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4546bf69-bd62-41a8-ade2-31c4e7f198da-auth-proxy-config\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.457200 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/951e34a8-00f5-479a-9de8-ee53ee32da75-serving-cert\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.458874 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/951e34a8-00f5-479a-9de8-ee53ee32da75-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.459346 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.460032 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-config\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.460654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-images\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.460816 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-encryption-config\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.461141 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-serving-cert\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.461465 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de71ba59-fb73-4f54-be20-690e4e94b446-config\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.454204 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/951e34a8-00f5-479a-9de8-ee53ee32da75-serving-cert\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.462035 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.462957 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a73f6e61-db8f-4ada-8825-201429e1f803-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-mrg8q\" (UID: \"a73f6e61-db8f-4ada-8825-201429e1f803\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.463121 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.463162 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-5zxrw"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.464498 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qnqmv"] Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.464669 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.464854 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de71ba59-fb73-4f54-be20-690e4e94b446-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.464669 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-etcd-client\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.465195 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.465794 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-serving-cert\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.479526 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.498643 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.518591 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.539743 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.559335 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562232 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-serving-cert\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562275 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnphq\" (UniqueName: \"kubernetes.io/projected/1d2f2183-42ac-4260-88cd-9379d25bf71d-kube-api-access-xnphq\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4427cefb-2293-4022-afed-686499999cef-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562331 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-audit-policies\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562376 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6bc1489-81cc-49df-a936-c0c19721b8c3-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562393 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-etcd-client\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562424 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d2f2183-42ac-4260-88cd-9379d25bf71d-serving-cert\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562448 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562468 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d2f2183-42ac-4260-88cd-9379d25bf71d-config\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562489 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/34073888-721b-429f-81c5-e12d90b5b7e8-images\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562506 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4427cefb-2293-4022-afed-686499999cef-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562525 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-serving-cert\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562540 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562557 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pj7jg\" (UniqueName: \"kubernetes.io/projected/492c9136-a973-43ca-ad2c-4650392e6e38-kube-api-access-pj7jg\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562572 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562590 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562609 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6bc1489-81cc-49df-a936-c0c19721b8c3-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562625 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/492c9136-a973-43ca-ad2c-4650392e6e38-tmpfs\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562642 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/492c9136-a973-43ca-ad2c-4650392e6e38-apiservice-cert\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562659 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b88b052c-169c-4802-8705-bbd084630a7a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4427cefb-2293-4022-afed-686499999cef-config\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562691 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/34073888-721b-429f-81c5-e12d90b5b7e8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562709 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvgsg\" (UniqueName: \"kubernetes.io/projected/34073888-721b-429f-81c5-e12d90b5b7e8-kube-api-access-fvgsg\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562741 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkbv8\" (UniqueName: \"kubernetes.io/projected/b88b052c-169c-4802-8705-bbd084630a7a-kube-api-access-gkbv8\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562757 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-trusted-ca\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49fd1de8-3f87-4d33-b209-df83dd1096a8-secret-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562798 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqc7h\" (UniqueName: \"kubernetes.io/projected/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-kube-api-access-nqc7h\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562823 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/34073888-721b-429f-81c5-e12d90b5b7e8-proxy-tls\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562839 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6bc1489-81cc-49df-a936-c0c19721b8c3-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562856 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-config\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562872 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/492c9136-a973-43ca-ad2c-4650392e6e38-webhook-cert\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562889 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562904 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-audit-dir\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562925 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b88b052c-169c-4802-8705-bbd084630a7a-srv-cert\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562964 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-encryption-config\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.562991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7hsj\" (UniqueName: \"kubernetes.io/projected/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-kube-api-access-v7hsj\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.563013 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9f8f4\" (UniqueName: \"kubernetes.io/projected/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-kube-api-access-9f8f4\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.563053 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef70370a-9dc0-4378-b4f5-ae6b279330b4-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v825d\" (UID: \"ef70370a-9dc0-4378-b4f5-ae6b279330b4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.563074 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt4rk\" (UniqueName: \"kubernetes.io/projected/49fd1de8-3f87-4d33-b209-df83dd1096a8-kube-api-access-gt4rk\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.563099 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpjwv\" (UniqueName: \"kubernetes.io/projected/ef70370a-9dc0-4378-b4f5-ae6b279330b4-kube-api-access-dpjwv\") pod \"multus-admission-controller-857f4d67dd-v825d\" (UID: \"ef70370a-9dc0-4378-b4f5-ae6b279330b4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.563252 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-audit-policies\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.564025 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/34073888-721b-429f-81c5-e12d90b5b7e8-auth-proxy-config\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.564047 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/492c9136-a973-43ca-ad2c-4650392e6e38-tmpfs\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.564189 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-audit-dir\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.564271 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.564210 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.564907 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-trusted-ca\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.565024 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-config\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.565557 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-serving-cert\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.566639 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-etcd-client\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.567342 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-encryption-config\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.567861 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.567960 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-serving-cert\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.572312 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.579131 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.599724 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.619262 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.640161 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.660175 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.680347 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.700638 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.719622 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.759766 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.780231 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.799131 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.819609 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.840053 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.860100 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.879650 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.900784 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.919467 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.939625 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.947450 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d6bc1489-81cc-49df-a936-c0c19721b8c3-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.959636 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.965470 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d6bc1489-81cc-49df-a936-c0c19721b8c3-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.979850 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 13:31:26 crc kubenswrapper[4857]: I1128 13:31:26.999130 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.019893 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.039789 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.059982 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.079795 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.088442 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/ef70370a-9dc0-4378-b4f5-ae6b279330b4-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-v825d\" (UID: \"ef70370a-9dc0-4378-b4f5-ae6b279330b4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.099523 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.120140 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.139999 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.147554 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b88b052c-169c-4802-8705-bbd084630a7a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.147991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49fd1de8-3f87-4d33-b209-df83dd1096a8-secret-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.160548 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.167862 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b88b052c-169c-4802-8705-bbd084630a7a-srv-cert\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.180130 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.200280 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.219614 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.225869 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1d2f2183-42ac-4260-88cd-9379d25bf71d-serving-cert\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.227672 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.227763 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.239726 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.260731 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.279470 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.300138 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.319985 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.324605 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1d2f2183-42ac-4260-88cd-9379d25bf71d-config\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.340084 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.361174 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.377722 4857 request.go:700] Waited for 1.01600047s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dpackageserver-service-cert&limit=500&resourceVersion=0 Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.380046 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.387270 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/492c9136-a973-43ca-ad2c-4650392e6e38-apiservice-cert\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.387799 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/492c9136-a973-43ca-ad2c-4650392e6e38-webhook-cert\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.400710 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.420091 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.440352 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.460011 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.466113 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4427cefb-2293-4022-afed-686499999cef-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.480469 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.500128 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.520079 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.540545 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.545543 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4427cefb-2293-4022-afed-686499999cef-config\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.560282 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 13:31:27 crc kubenswrapper[4857]: E1128 13:31:27.564145 4857 configmap.go:193] Couldn't get configMap openshift-operator-lifecycle-manager/collect-profiles-config: failed to sync configmap cache: timed out waiting for the condition Nov 28 13:31:27 crc kubenswrapper[4857]: E1128 13:31:27.564246 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume podName:49fd1de8-3f87-4d33-b209-df83dd1096a8 nodeName:}" failed. No retries permitted until 2025-11-28 13:31:28.06422464 +0000 UTC m=+138.188166077 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-volume" (UniqueName: "kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume") pod "collect-profiles-29405610-z8jl4" (UID: "49fd1de8-3f87-4d33-b209-df83dd1096a8") : failed to sync configmap cache: timed out waiting for the condition Nov 28 13:31:27 crc kubenswrapper[4857]: E1128 13:31:27.564611 4857 secret.go:188] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: failed to sync secret cache: timed out waiting for the condition Nov 28 13:31:27 crc kubenswrapper[4857]: E1128 13:31:27.564660 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/34073888-721b-429f-81c5-e12d90b5b7e8-proxy-tls podName:34073888-721b-429f-81c5-e12d90b5b7e8 nodeName:}" failed. No retries permitted until 2025-11-28 13:31:28.064648612 +0000 UTC m=+138.188590049 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/34073888-721b-429f-81c5-e12d90b5b7e8-proxy-tls") pod "machine-config-operator-74547568cd-78f6q" (UID: "34073888-721b-429f-81c5-e12d90b5b7e8") : failed to sync secret cache: timed out waiting for the condition Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.564716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/34073888-721b-429f-81c5-e12d90b5b7e8-images\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.579517 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.600003 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.639644 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.659213 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.680432 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.700707 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.720182 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.740523 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.760107 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.779306 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.799754 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.820414 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.840649 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.859457 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.879355 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.899575 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.929769 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.939593 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.960384 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 13:31:27 crc kubenswrapper[4857]: I1128 13:31:27.980546 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.000072 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.020550 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.046914 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.060172 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.080056 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.083180 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/34073888-721b-429f-81c5-e12d90b5b7e8-proxy-tls\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.084023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.084897 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.086505 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/34073888-721b-429f-81c5-e12d90b5b7e8-proxy-tls\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.100719 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.120650 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.139972 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.160583 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.180576 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.200367 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.220185 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.254522 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qt4xz\" (UniqueName: \"kubernetes.io/projected/de71ba59-fb73-4f54-be20-690e4e94b446-kube-api-access-qt4xz\") pod \"openshift-apiserver-operator-796bbdcf4f-92tgg\" (UID: \"de71ba59-fb73-4f54-be20-690e4e94b446\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.275172 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmjnv\" (UniqueName: \"kubernetes.io/projected/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-kube-api-access-dmjnv\") pod \"controller-manager-879f6c89f-cvb79\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.293932 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzbgv\" (UniqueName: \"kubernetes.io/projected/a73f6e61-db8f-4ada-8825-201429e1f803-kube-api-access-kzbgv\") pod \"cluster-samples-operator-665b6dd947-mrg8q\" (UID: \"a73f6e61-db8f-4ada-8825-201429e1f803\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.313345 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdslz\" (UniqueName: \"kubernetes.io/projected/5ccc741a-98e5-420d-ad6e-4260ea2abb1f-kube-api-access-qdslz\") pod \"apiserver-76f77b778f-qp4rc\" (UID: \"5ccc741a-98e5-420d-ad6e-4260ea2abb1f\") " pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.334557 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plnpj\" (UniqueName: \"kubernetes.io/projected/1cb29ded-850a-45ff-8201-a991fe779c01-kube-api-access-plnpj\") pod \"downloads-7954f5f757-5c5lq\" (UID: \"1cb29ded-850a-45ff-8201-a991fe779c01\") " pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.357638 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-222kx\" (UniqueName: \"kubernetes.io/projected/951e34a8-00f5-479a-9de8-ee53ee32da75-kube-api-access-222kx\") pod \"openshift-config-operator-7777fb866f-9sptz\" (UID: \"951e34a8-00f5-479a-9de8-ee53ee32da75\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.372863 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrrrb\" (UniqueName: \"kubernetes.io/projected/0723fd39-fd72-4aae-a8ac-4a69a9cea44e-kube-api-access-nrrrb\") pod \"machine-api-operator-5694c8668f-nm6c5\" (UID: \"0723fd39-fd72-4aae-a8ac-4a69a9cea44e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.378322 4857 request.go:700] Waited for 1.917299466s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-cluster-machine-approver/serviceaccounts/machine-approver-sa/token Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.381800 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.394583 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5sv8\" (UniqueName: \"kubernetes.io/projected/4546bf69-bd62-41a8-ade2-31c4e7f198da-kube-api-access-g5sv8\") pod \"machine-approver-56656f9798-xdww2\" (UID: \"4546bf69-bd62-41a8-ade2-31c4e7f198da\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.400283 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.418613 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.421286 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.428185 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:28 crc kubenswrapper[4857]: W1128 13:31:28.435787 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4546bf69_bd62_41a8_ade2_31c4e7f198da.slice/crio-4c9fa9b312b8b74a9878cf47ee1ec30bea068c0bfd9eff22e43343e2d810ca25 WatchSource:0}: Error finding container 4c9fa9b312b8b74a9878cf47ee1ec30bea068c0bfd9eff22e43343e2d810ca25: Status 404 returned error can't find the container with id 4c9fa9b312b8b74a9878cf47ee1ec30bea068c0bfd9eff22e43343e2d810ca25 Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.439629 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.454241 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.463832 4857 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.479012 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.479605 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.493733 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.500124 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.503285 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.542362 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.548911 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnphq\" (UniqueName: \"kubernetes.io/projected/1d2f2183-42ac-4260-88cd-9379d25bf71d-kube-api-access-xnphq\") pod \"service-ca-operator-777779d784-hl6h5\" (UID: \"1d2f2183-42ac-4260-88cd-9379d25bf71d\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.573668 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pj7jg\" (UniqueName: \"kubernetes.io/projected/492c9136-a973-43ca-ad2c-4650392e6e38-kube-api-access-pj7jg\") pod \"packageserver-d55dfcdfc-d8ghr\" (UID: \"492c9136-a973-43ca-ad2c-4650392e6e38\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.576131 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.594829 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpjwv\" (UniqueName: \"kubernetes.io/projected/ef70370a-9dc0-4378-b4f5-ae6b279330b4-kube-api-access-dpjwv\") pod \"multus-admission-controller-857f4d67dd-v825d\" (UID: \"ef70370a-9dc0-4378-b4f5-ae6b279330b4\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.629701 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4427cefb-2293-4022-afed-686499999cef-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-p7lbs\" (UID: \"4427cefb-2293-4022-afed-686499999cef\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.642028 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvgsg\" (UniqueName: \"kubernetes.io/projected/34073888-721b-429f-81c5-e12d90b5b7e8-kube-api-access-fvgsg\") pod \"machine-config-operator-74547568cd-78f6q\" (UID: \"34073888-721b-429f-81c5-e12d90b5b7e8\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.662973 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkbv8\" (UniqueName: \"kubernetes.io/projected/b88b052c-169c-4802-8705-bbd084630a7a-kube-api-access-gkbv8\") pod \"olm-operator-6b444d44fb-ps26v\" (UID: \"b88b052c-169c-4802-8705-bbd084630a7a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.676908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d6bc1489-81cc-49df-a936-c0c19721b8c3-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-gv6hw\" (UID: \"d6bc1489-81cc-49df-a936-c0c19721b8c3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.699414 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7hsj\" (UniqueName: \"kubernetes.io/projected/a29f471d-4c21-4e9b-a479-3b8f0d1e0878-kube-api-access-v7hsj\") pod \"console-operator-58897d9998-bp9rq\" (UID: \"a29f471d-4c21-4e9b-a479-3b8f0d1e0878\") " pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.700875 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.710307 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.717247 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqc7h\" (UniqueName: \"kubernetes.io/projected/fd021cd9-72f4-40a3-87f9-cb253db8ebdc-kube-api-access-nqc7h\") pod \"cluster-image-registry-operator-dc59b4c8b-t48t9\" (UID: \"fd021cd9-72f4-40a3-87f9-cb253db8ebdc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.729990 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.738783 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9f8f4\" (UniqueName: \"kubernetes.io/projected/599a933f-fb51-4ba8-bf84-bd7f9ce63af5-kube-api-access-9f8f4\") pod \"apiserver-7bbb656c7d-76m2l\" (UID: \"599a933f-fb51-4ba8-bf84-bd7f9ce63af5\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.748548 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.755036 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt4rk\" (UniqueName: \"kubernetes.io/projected/49fd1de8-3f87-4d33-b209-df83dd1096a8-kube-api-access-gt4rk\") pod \"collect-profiles-29405610-z8jl4\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.773271 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.781040 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.781083 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.799941 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.804547 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" event={"ID":"4546bf69-bd62-41a8-ade2-31c4e7f198da","Type":"ContainerStarted","Data":"4c9fa9b312b8b74a9878cf47ee1ec30bea068c0bfd9eff22e43343e2d810ca25"} Nov 28 13:31:28 crc kubenswrapper[4857]: I1128 13:31:28.810498 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.578491 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.579102 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.579842 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.580800 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.588306 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-certificates\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.588351 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-tls\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.588391 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.588416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-ca-trust-extracted\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: E1128 13:31:30.590756 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.090727467 +0000 UTC m=+141.214668904 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.676004 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cvb79"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.678871 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-v825d"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.685364 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9sptz"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.690332 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692567 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-oauth-serving-cert\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692590 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-service-ca\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692623 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-serving-cert\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692642 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-trusted-ca\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692667 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcxq9\" (UniqueName: \"kubernetes.io/projected/00b39dd2-3b38-4fec-9162-e06fcb862af2-kube-api-access-wcxq9\") pod \"migrator-59844c95c7-wzgtn\" (UID: \"00b39dd2-3b38-4fec-9162-e06fcb862af2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692683 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692698 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692713 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz7rh\" (UniqueName: \"kubernetes.io/projected/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-kube-api-access-rz7rh\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-signing-cabundle\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692769 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-dir\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692791 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692815 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6e62429f-c841-417c-b2cd-32130d5cf05f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692840 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mk6s\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-kube-api-access-8mk6s\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692854 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/844ce19a-9c82-4a37-8170-db724fabc85c-serving-cert\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692869 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-service-ca-bundle\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692885 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-service-ca-bundle\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692935 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-client-ca\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.692952 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-stats-auth\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693059 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-installation-pull-secrets\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693092 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-ca-trust-extracted\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693108 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5bbd\" (UniqueName: \"kubernetes.io/projected/844ce19a-9c82-4a37-8170-db724fabc85c-kube-api-access-g5bbd\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693123 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-bound-sa-token\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693138 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693156 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-metrics-certs\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693173 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-default-certificate\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693198 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/924df434-aae1-4b09-adc3-01b3e079fb3f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693252 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-policies\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693268 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693286 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52tg9\" (UniqueName: \"kubernetes.io/projected/94ffd41a-3797-4314-a270-c2b1bd143b04-kube-api-access-52tg9\") pod \"package-server-manager-789f6589d5-gdkwk\" (UID: \"94ffd41a-3797-4314-a270-c2b1bd143b04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693304 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-oauth-config\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693328 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-ca\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: E1128 13:31:30.693373 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.193350537 +0000 UTC m=+141.317291974 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693426 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/94ffd41a-3797-4314-a270-c2b1bd143b04-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-gdkwk\" (UID: \"94ffd41a-3797-4314-a270-c2b1bd143b04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693452 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693518 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9ffz\" (UniqueName: \"kubernetes.io/projected/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-kube-api-access-t9ffz\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693536 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-config\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693572 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-trusted-ca-bundle\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693599 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/924df434-aae1-4b09-adc3-01b3e079fb3f-config\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693613 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-service-ca\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693629 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-config\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693649 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v8n5\" (UniqueName: \"kubernetes.io/projected/93adcd15-ad3e-4583-81b9-1aa85b5d7ada-kube-api-access-2v8n5\") pod \"dns-operator-744455d44c-jp48x\" (UID: \"93adcd15-ad3e-4583-81b9-1aa85b5d7ada\") " pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693667 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-signing-key\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693688 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv98r\" (UniqueName: \"kubernetes.io/projected/5f40ecda-b519-4cfe-8b7b-6854e018fe24-kube-api-access-tv98r\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693719 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6e62429f-c841-417c-b2cd-32130d5cf05f-proxy-tls\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693750 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693772 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693802 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/924df434-aae1-4b09-adc3-01b3e079fb3f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693824 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-certificates\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693852 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-tls\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693888 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.693908 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv27p\" (UniqueName: \"kubernetes.io/projected/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-kube-api-access-pv27p\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.694186 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-client\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.694212 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nflf2\" (UniqueName: \"kubernetes.io/projected/6e62429f-c841-417c-b2cd-32130d5cf05f-kube-api-access-nflf2\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.694236 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz75j\" (UniqueName: \"kubernetes.io/projected/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-kube-api-access-xz75j\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.695514 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-ca-trust-extracted\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.695779 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-5c5lq"] Nov 28 13:31:30 crc kubenswrapper[4857]: E1128 13:31:30.697898 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.197879587 +0000 UTC m=+141.321821104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-config\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-config\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698771 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/93adcd15-ad3e-4583-81b9-1aa85b5d7ada-metrics-tls\") pod \"dns-operator-744455d44c-jp48x\" (UID: \"93adcd15-ad3e-4583-81b9-1aa85b5d7ada\") " pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698793 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698864 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mbsm\" (UniqueName: \"kubernetes.io/projected/7c609810-eec1-4f73-ad29-24fc190b1ffa-kube-api-access-7mbsm\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698878 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-qp4rc"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698927 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-serving-cert\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.698969 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d39d20be-e63a-4fef-bde2-6f4e76051828-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4xnj2\" (UID: \"d39d20be-e63a-4fef-bde2-6f4e76051828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.699032 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-serving-cert\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.699051 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qp549\" (UniqueName: \"kubernetes.io/projected/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-kube-api-access-qp549\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.699070 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96chg\" (UniqueName: \"kubernetes.io/projected/d39d20be-e63a-4fef-bde2-6f4e76051828-kube-api-access-96chg\") pod \"control-plane-machine-set-operator-78cbb6b69f-4xnj2\" (UID: \"d39d20be-e63a-4fef-bde2-6f4e76051828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.701850 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-certificates\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.707432 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.707491 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-nm6c5"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.711609 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-tls\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: W1128 13:31:30.759370 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cab1b93_9ab5_4fdb_b6f6_d86a1a50b558.slice/crio-efbb52fbe69a9f85b7e950dee6a37a78c9bd15fa14fcc36abcec73c2c9a8da17 WatchSource:0}: Error finding container efbb52fbe69a9f85b7e950dee6a37a78c9bd15fa14fcc36abcec73c2c9a8da17: Status 404 returned error can't find the container with id efbb52fbe69a9f85b7e950dee6a37a78c9bd15fa14fcc36abcec73c2c9a8da17 Nov 28 13:31:30 crc kubenswrapper[4857]: W1128 13:31:30.764319 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ccc741a_98e5_420d_ad6e_4260ea2abb1f.slice/crio-376ff6f1c464237a9cf192716ce52f574dc9967112987bb40519dae7a6487924 WatchSource:0}: Error finding container 376ff6f1c464237a9cf192716ce52f574dc9967112987bb40519dae7a6487924: Status 404 returned error can't find the container with id 376ff6f1c464237a9cf192716ce52f574dc9967112987bb40519dae7a6487924 Nov 28 13:31:30 crc kubenswrapper[4857]: W1128 13:31:30.769540 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0723fd39_fd72_4aae_a8ac_4a69a9cea44e.slice/crio-e3bba44b5183a01cc2e71d3b55167aa75018c068b3970b6d47f7d15ddc27f6de WatchSource:0}: Error finding container e3bba44b5183a01cc2e71d3b55167aa75018c068b3970b6d47f7d15ddc27f6de: Status 404 returned error can't find the container with id e3bba44b5183a01cc2e71d3b55167aa75018c068b3970b6d47f7d15ddc27f6de Nov 28 13:31:30 crc kubenswrapper[4857]: W1128 13:31:30.774802 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde71ba59_fb73_4f54_be20_690e4e94b446.slice/crio-8d542de7fb175afeab6f61ef7731649b8009bb03f81a732af8530c8aafc429f4 WatchSource:0}: Error finding container 8d542de7fb175afeab6f61ef7731649b8009bb03f81a732af8530c8aafc429f4: Status 404 returned error can't find the container with id 8d542de7fb175afeab6f61ef7731649b8009bb03f81a732af8530c8aafc429f4 Nov 28 13:31:30 crc kubenswrapper[4857]: W1128 13:31:30.791089 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1cb29ded_850a_45ff_8201_a991fe779c01.slice/crio-99c1f1f5c07faf38ec80299cff5701e3093a3872dc262e7cf243077232fda39c WatchSource:0}: Error finding container 99c1f1f5c07faf38ec80299cff5701e3093a3872dc262e7cf243077232fda39c: Status 404 returned error can't find the container with id 99c1f1f5c07faf38ec80299cff5701e3093a3872dc262e7cf243077232fda39c Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800526 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800760 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-config\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800790 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fff47bbd-bac2-436d-b331-23f8aa856bd0-cert\") pod \"ingress-canary-g79kz\" (UID: \"fff47bbd-bac2-436d-b331-23f8aa856bd0\") " pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800825 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-trusted-ca-bundle\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800847 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/924df434-aae1-4b09-adc3-01b3e079fb3f-config\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800862 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-service-ca\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800885 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-config\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800913 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vfm8r\" (UniqueName: \"kubernetes.io/projected/3b437875-4ae6-4455-a281-84281a93fad0-kube-api-access-vfm8r\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800950 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v8n5\" (UniqueName: \"kubernetes.io/projected/93adcd15-ad3e-4583-81b9-1aa85b5d7ada-kube-api-access-2v8n5\") pod \"dns-operator-744455d44c-jp48x\" (UID: \"93adcd15-ad3e-4583-81b9-1aa85b5d7ada\") " pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.800972 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-signing-key\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801082 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv98r\" (UniqueName: \"kubernetes.io/projected/5f40ecda-b519-4cfe-8b7b-6854e018fe24-kube-api-access-tv98r\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801114 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6e62429f-c841-417c-b2cd-32130d5cf05f-proxy-tls\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801133 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mvbr\" (UniqueName: \"kubernetes.io/projected/51677670-528c-40ba-acf4-e9b506e48a84-kube-api-access-8mvbr\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801162 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801213 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/924df434-aae1-4b09-adc3-01b3e079fb3f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801231 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-socket-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801275 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beba01d0-40fd-49b1-ba9c-99dd121db738-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801305 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv27p\" (UniqueName: \"kubernetes.io/projected/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-kube-api-access-pv27p\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-client\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801372 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nflf2\" (UniqueName: \"kubernetes.io/projected/6e62429f-c841-417c-b2cd-32130d5cf05f-kube-api-access-nflf2\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801458 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz75j\" (UniqueName: \"kubernetes.io/projected/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-kube-api-access-xz75j\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-config\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801507 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-csi-data-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801528 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801557 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-config\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801580 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13499e68-b11e-4914-8a02-7c63458afd38-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/93adcd15-ad3e-4583-81b9-1aa85b5d7ada-metrics-tls\") pod \"dns-operator-744455d44c-jp48x\" (UID: \"93adcd15-ad3e-4583-81b9-1aa85b5d7ada\") " pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801611 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/924df434-aae1-4b09-adc3-01b3e079fb3f-config\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801625 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mbsm\" (UniqueName: \"kubernetes.io/projected/7c609810-eec1-4f73-ad29-24fc190b1ffa-kube-api-access-7mbsm\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801704 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/beba01d0-40fd-49b1-ba9c-99dd121db738-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-serving-cert\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801742 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d39d20be-e63a-4fef-bde2-6f4e76051828-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4xnj2\" (UID: \"d39d20be-e63a-4fef-bde2-6f4e76051828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801763 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-plugins-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801782 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-serving-cert\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801799 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qp549\" (UniqueName: \"kubernetes.io/projected/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-kube-api-access-qp549\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96chg\" (UniqueName: \"kubernetes.io/projected/d39d20be-e63a-4fef-bde2-6f4e76051828-kube-api-access-96chg\") pod \"control-plane-machine-set-operator-78cbb6b69f-4xnj2\" (UID: \"d39d20be-e63a-4fef-bde2-6f4e76051828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801863 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801888 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-oauth-serving-cert\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801908 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-profile-collector-cert\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801926 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-config\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801953 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-service-ca\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.801972 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802031 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13499e68-b11e-4914-8a02-7c63458afd38-trusted-ca\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802051 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-serving-cert\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802082 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-trusted-ca\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802100 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcxq9\" (UniqueName: \"kubernetes.io/projected/00b39dd2-3b38-4fec-9162-e06fcb862af2-kube-api-access-wcxq9\") pod \"migrator-59844c95c7-wzgtn\" (UID: \"00b39dd2-3b38-4fec-9162-e06fcb862af2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802116 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802141 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802133 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-service-ca\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802161 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz7rh\" (UniqueName: \"kubernetes.io/projected/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-kube-api-access-rz7rh\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802189 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-signing-cabundle\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802231 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsskt\" (UniqueName: \"kubernetes.io/projected/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-kube-api-access-qsskt\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-dir\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802266 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6e62429f-c841-417c-b2cd-32130d5cf05f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802321 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mk6s\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-kube-api-access-8mk6s\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802345 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/844ce19a-9c82-4a37-8170-db724fabc85c-serving-cert\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802377 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-srv-cert\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802395 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-service-ca-bundle\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqvnv\" (UniqueName: \"kubernetes.io/projected/ece8c985-18db-4433-8f21-503a4718c5bf-kube-api-access-wqvnv\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802437 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802458 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8s4w\" (UniqueName: \"kubernetes.io/projected/fff47bbd-bac2-436d-b331-23f8aa856bd0-kube-api-access-c8s4w\") pod \"ingress-canary-g79kz\" (UID: \"fff47bbd-bac2-436d-b331-23f8aa856bd0\") " pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802474 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-service-ca-bundle\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802494 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xldx\" (UniqueName: \"kubernetes.io/projected/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-kube-api-access-4xldx\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802513 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-client-ca\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802528 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-stats-auth\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802566 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-installation-pull-secrets\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrt2x\" (UniqueName: \"kubernetes.io/projected/beba01d0-40fd-49b1-ba9c-99dd121db738-kube-api-access-jrt2x\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802598 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-metrics-tls\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802628 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5bbd\" (UniqueName: \"kubernetes.io/projected/844ce19a-9c82-4a37-8170-db724fabc85c-kube-api-access-g5bbd\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802652 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-bound-sa-token\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802672 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802690 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802739 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-metrics-certs\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802766 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ece8c985-18db-4433-8f21-503a4718c5bf-node-bootstrap-token\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802783 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ece8c985-18db-4433-8f21-503a4718c5bf-certs\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802799 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-default-certificate\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/924df434-aae1-4b09-adc3-01b3e079fb3f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802833 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-config-volume\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802924 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-policies\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802944 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.802969 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52tg9\" (UniqueName: \"kubernetes.io/projected/94ffd41a-3797-4314-a270-c2b1bd143b04-kube-api-access-52tg9\") pod \"package-server-manager-789f6589d5-gdkwk\" (UID: \"94ffd41a-3797-4314-a270-c2b1bd143b04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803015 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-registration-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803046 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-oauth-config\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803063 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803092 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26kcd\" (UniqueName: \"kubernetes.io/projected/13499e68-b11e-4914-8a02-7c63458afd38-kube-api-access-26kcd\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803129 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-ca\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803146 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-mountpoint-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803186 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/94ffd41a-3797-4314-a270-c2b1bd143b04-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-gdkwk\" (UID: \"94ffd41a-3797-4314-a270-c2b1bd143b04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803225 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803241 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13499e68-b11e-4914-8a02-7c63458afd38-metrics-tls\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803281 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9ffz\" (UniqueName: \"kubernetes.io/projected/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-kube-api-access-t9ffz\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: E1128 13:31:30.803389 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.303375599 +0000 UTC m=+141.427317037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.803814 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-config\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.805044 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-config\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.805128 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-config\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: W1128 13:31:30.805577 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod951e34a8_00f5_479a_9de8_ee53ee32da75.slice/crio-7949e119cbd0d8311f7bfa30a9b72cbda0b51db3197046ab1e8a3400780bac4a WatchSource:0}: Error finding container 7949e119cbd0d8311f7bfa30a9b72cbda0b51db3197046ab1e8a3400780bac4a: Status 404 returned error can't find the container with id 7949e119cbd0d8311f7bfa30a9b72cbda0b51db3197046ab1e8a3400780bac4a Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.808878 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-client-ca\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.809161 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.809260 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-serving-cert\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.810852 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-service-ca-bundle\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.811297 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-client\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.814113 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-stats-auth\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.815659 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/d39d20be-e63a-4fef-bde2-6f4e76051828-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-4xnj2\" (UID: \"d39d20be-e63a-4fef-bde2-6f4e76051828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.816379 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6e62429f-c841-417c-b2cd-32130d5cf05f-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.817756 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-installation-pull-secrets\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.818419 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-signing-cabundle\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.818670 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-dir\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.820249 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.823461 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-service-ca\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.823747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.831265 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-serving-cert\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.832311 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/924df434-aae1-4b09-adc3-01b3e079fb3f-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.832739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.833128 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz75j\" (UniqueName: \"kubernetes.io/projected/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-kube-api-access-xz75j\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.833745 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/844ce19a-9c82-4a37-8170-db724fabc85c-serving-cert\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.833895 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6e62429f-c841-417c-b2cd-32130d5cf05f-proxy-tls\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.834361 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/194b3b2b-5c56-44e9-8800-fd8fafbcd3d3-signing-key\") pod \"service-ca-9c57cc56f-7ch8g\" (UID: \"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3\") " pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.835878 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.836425 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.837929 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-trusted-ca-bundle\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.838749 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.839745 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-trusted-ca\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.843262 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-serving-cert\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.843318 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-policies\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.844108 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.844154 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-etcd-ca\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.844260 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.844684 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.845247 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.845518 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-oauth-serving-cert\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.845837 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-service-ca-bundle\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.851736 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/93adcd15-ad3e-4583-81b9-1aa85b5d7ada-metrics-tls\") pod \"dns-operator-744455d44c-jp48x\" (UID: \"93adcd15-ad3e-4583-81b9-1aa85b5d7ada\") " pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.852260 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-oauth-config\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.856399 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.862177 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5bbd\" (UniqueName: \"kubernetes.io/projected/844ce19a-9c82-4a37-8170-db724fabc85c-kube-api-access-g5bbd\") pod \"route-controller-manager-6576b87f9c-m9bfw\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.864583 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.869602 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-metrics-certs\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.879534 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v8n5\" (UniqueName: \"kubernetes.io/projected/93adcd15-ad3e-4583-81b9-1aa85b5d7ada-kube-api-access-2v8n5\") pod \"dns-operator-744455d44c-jp48x\" (UID: \"93adcd15-ad3e-4583-81b9-1aa85b5d7ada\") " pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.880089 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-default-certificate\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.890588 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/94ffd41a-3797-4314-a270-c2b1bd143b04-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-gdkwk\" (UID: \"94ffd41a-3797-4314-a270-c2b1bd143b04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.891200 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcxq9\" (UniqueName: \"kubernetes.io/projected/00b39dd2-3b38-4fec-9162-e06fcb862af2-kube-api-access-wcxq9\") pod \"migrator-59844c95c7-wzgtn\" (UID: \"00b39dd2-3b38-4fec-9162-e06fcb862af2\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.883008 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.918227 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9ffz\" (UniqueName: \"kubernetes.io/projected/3150e4a1-9ef4-4a79-a40c-8eea82b5c19a-kube-api-access-t9ffz\") pod \"openshift-controller-manager-operator-756b6f6bc6-2gj2l\" (UID: \"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.918675 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz7rh\" (UniqueName: \"kubernetes.io/projected/39eab4a3-7a84-4550-bd46-1fbdc8f8e087-kube-api-access-rz7rh\") pod \"authentication-operator-69f744f599-prc44\" (UID: \"39eab4a3-7a84-4550-bd46-1fbdc8f8e087\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.920220 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.922421 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52tg9\" (UniqueName: \"kubernetes.io/projected/94ffd41a-3797-4314-a270-c2b1bd143b04-kube-api-access-52tg9\") pod \"package-server-manager-789f6589d5-gdkwk\" (UID: \"94ffd41a-3797-4314-a270-c2b1bd143b04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.924531 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-bound-sa-token\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925627 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mvbr\" (UniqueName: \"kubernetes.io/projected/51677670-528c-40ba-acf4-e9b506e48a84-kube-api-access-8mvbr\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-socket-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925701 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beba01d0-40fd-49b1-ba9c-99dd121db738-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925732 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925782 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-csi-data-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qp549\" (UniqueName: \"kubernetes.io/projected/454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a-kube-api-access-qp549\") pod \"etcd-operator-b45778765-6qvft\" (UID: \"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a\") " pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925820 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13499e68-b11e-4914-8a02-7c63458afd38-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925906 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/beba01d0-40fd-49b1-ba9c-99dd121db738-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.925941 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-plugins-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926018 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-profile-collector-cert\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926059 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13499e68-b11e-4914-8a02-7c63458afd38-trusted-ca\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926102 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsskt\" (UniqueName: \"kubernetes.io/projected/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-kube-api-access-qsskt\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926149 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-srv-cert\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926173 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqvnv\" (UniqueName: \"kubernetes.io/projected/ece8c985-18db-4433-8f21-503a4718c5bf-kube-api-access-wqvnv\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926200 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8s4w\" (UniqueName: \"kubernetes.io/projected/fff47bbd-bac2-436d-b331-23f8aa856bd0-kube-api-access-c8s4w\") pod \"ingress-canary-g79kz\" (UID: \"fff47bbd-bac2-436d-b331-23f8aa856bd0\") " pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xldx\" (UniqueName: \"kubernetes.io/projected/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-kube-api-access-4xldx\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926257 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrt2x\" (UniqueName: \"kubernetes.io/projected/beba01d0-40fd-49b1-ba9c-99dd121db738-kube-api-access-jrt2x\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926275 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-metrics-tls\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926273 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv27p\" (UniqueName: \"kubernetes.io/projected/6c5bcd39-8067-47c3-bc3b-5aa1aed7b516-kube-api-access-pv27p\") pod \"router-default-5444994796-ghlnz\" (UID: \"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516\") " pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926309 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ece8c985-18db-4433-8f21-503a4718c5bf-certs\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926328 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mk6s\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-kube-api-access-8mk6s\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926353 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926387 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ece8c985-18db-4433-8f21-503a4718c5bf-node-bootstrap-token\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926419 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-config-volume\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926472 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-registration-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926529 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-mountpoint-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926551 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26kcd\" (UniqueName: \"kubernetes.io/projected/13499e68-b11e-4914-8a02-7c63458afd38-kube-api-access-26kcd\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13499e68-b11e-4914-8a02-7c63458afd38-metrics-tls\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926633 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fff47bbd-bac2-436d-b331-23f8aa856bd0-cert\") pod \"ingress-canary-g79kz\" (UID: \"fff47bbd-bac2-436d-b331-23f8aa856bd0\") " pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.926665 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vfm8r\" (UniqueName: \"kubernetes.io/projected/3b437875-4ae6-4455-a281-84281a93fad0-kube-api-access-vfm8r\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.927762 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nflf2\" (UniqueName: \"kubernetes.io/projected/6e62429f-c841-417c-b2cd-32130d5cf05f-kube-api-access-nflf2\") pod \"machine-config-controller-84d6567774-fcpdp\" (UID: \"6e62429f-c841-417c-b2cd-32130d5cf05f\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.928161 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/924df434-aae1-4b09-adc3-01b3e079fb3f-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-n8z9p\" (UID: \"924df434-aae1-4b09-adc3-01b3e079fb3f\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.928761 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mbsm\" (UniqueName: \"kubernetes.io/projected/7c609810-eec1-4f73-ad29-24fc190b1ffa-kube-api-access-7mbsm\") pod \"oauth-openshift-558db77b4-m658x\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.928924 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96chg\" (UniqueName: \"kubernetes.io/projected/d39d20be-e63a-4fef-bde2-6f4e76051828-kube-api-access-96chg\") pod \"control-plane-machine-set-operator-78cbb6b69f-4xnj2\" (UID: \"d39d20be-e63a-4fef-bde2-6f4e76051828\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.930360 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv98r\" (UniqueName: \"kubernetes.io/projected/5f40ecda-b519-4cfe-8b7b-6854e018fe24-kube-api-access-tv98r\") pod \"console-f9d7485db-6gf7t\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.931373 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-srv-cert\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.931472 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-mountpoint-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.932334 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-config-volume\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.932708 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-registration-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.934378 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-plugins-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.934781 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/beba01d0-40fd-49b1-ba9c-99dd121db738-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.935693 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-socket-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.937211 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.937568 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.937705 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/ece8c985-18db-4433-8f21-503a4718c5bf-certs\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.937786 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3b437875-4ae6-4455-a281-84281a93fad0-csi-data-dir\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: E1128 13:31:30.937968 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.437946417 +0000 UTC m=+141.561887854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.940232 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-metrics-tls\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.943471 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/13499e68-b11e-4914-8a02-7c63458afd38-metrics-tls\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.945454 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.951352 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-profile-collector-cert\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.952690 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/beba01d0-40fd-49b1-ba9c-99dd121db738-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.953403 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/ece8c985-18db-4433-8f21-503a4718c5bf-node-bootstrap-token\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.954309 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/fff47bbd-bac2-436d-b331-23f8aa856bd0-cert\") pod \"ingress-canary-g79kz\" (UID: \"fff47bbd-bac2-436d-b331-23f8aa856bd0\") " pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.962049 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsskt\" (UniqueName: \"kubernetes.io/projected/1bc08a92-d2bf-41ce-84b7-675c9c6a24de-kube-api-access-qsskt\") pod \"dns-default-jjd26\" (UID: \"1bc08a92-d2bf-41ce-84b7-675c9c6a24de\") " pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.962685 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/13499e68-b11e-4914-8a02-7c63458afd38-bound-sa-token\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.963561 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqvnv\" (UniqueName: \"kubernetes.io/projected/ece8c985-18db-4433-8f21-503a4718c5bf-kube-api-access-wqvnv\") pod \"machine-config-server-5zxrw\" (UID: \"ece8c985-18db-4433-8f21-503a4718c5bf\") " pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.967597 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.968740 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrt2x\" (UniqueName: \"kubernetes.io/projected/beba01d0-40fd-49b1-ba9c-99dd121db738-kube-api-access-jrt2x\") pod \"kube-storage-version-migrator-operator-b67b599dd-5wxk5\" (UID: \"beba01d0-40fd-49b1-ba9c-99dd121db738\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.968804 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/13499e68-b11e-4914-8a02-7c63458afd38-trusted-ca\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.972070 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-5zxrw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.972934 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vfm8r\" (UniqueName: \"kubernetes.io/projected/3b437875-4ae6-4455-a281-84281a93fad0-kube-api-access-vfm8r\") pod \"csi-hostpathplugin-qnqmv\" (UID: \"3b437875-4ae6-4455-a281-84281a93fad0\") " pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.979522 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mvbr\" (UniqueName: \"kubernetes.io/projected/51677670-528c-40ba-acf4-e9b506e48a84-kube-api-access-8mvbr\") pod \"marketplace-operator-79b997595-plzgn\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.990460 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs"] Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.990765 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.993488 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.994891 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8s4w\" (UniqueName: \"kubernetes.io/projected/fff47bbd-bac2-436d-b331-23f8aa856bd0-kube-api-access-c8s4w\") pod \"ingress-canary-g79kz\" (UID: \"fff47bbd-bac2-436d-b331-23f8aa856bd0\") " pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.995149 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26kcd\" (UniqueName: \"kubernetes.io/projected/13499e68-b11e-4914-8a02-7c63458afd38-kube-api-access-26kcd\") pod \"ingress-operator-5b745b69d9-ddlrq\" (UID: \"13499e68-b11e-4914-8a02-7c63458afd38\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.995156 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xldx\" (UniqueName: \"kubernetes.io/projected/7ee007cb-bc82-48e8-a1ff-34f37f55ba19-kube-api-access-4xldx\") pod \"catalog-operator-68c6474976-lq2rk\" (UID: \"7ee007cb-bc82-48e8-a1ff-34f37f55ba19\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:30 crc kubenswrapper[4857]: I1128 13:31:30.995773 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.007470 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.013345 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.029101 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.029445 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.529429757 +0000 UTC m=+141.653371194 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.037142 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.038436 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.049597 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.055184 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:31 crc kubenswrapper[4857]: W1128 13:31:31.058790 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4427cefb_2293_4022_afed_686499999cef.slice/crio-080a54bd38b356ab7bb17ce6951372a28f2984f849004bd58059a758c77a4ded WatchSource:0}: Error finding container 080a54bd38b356ab7bb17ce6951372a28f2984f849004bd58059a758c77a4ded: Status 404 returned error can't find the container with id 080a54bd38b356ab7bb17ce6951372a28f2984f849004bd58059a758c77a4ded Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.087750 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.094231 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.103044 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.106103 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.119688 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.130528 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.130822 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.630810021 +0000 UTC m=+141.754751458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.147160 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.153235 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.155813 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.159356 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:31 crc kubenswrapper[4857]: W1128 13:31:31.204896 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod34073888_721b_429f_81c5_e12d90b5b7e8.slice/crio-dea5bb62ee1fb2bdefeb84b646d295b27ec7f44b8be3f88149725d26a810169c WatchSource:0}: Error finding container dea5bb62ee1fb2bdefeb84b646d295b27ec7f44b8be3f88149725d26a810169c: Status 404 returned error can't find the container with id dea5bb62ee1fb2bdefeb84b646d295b27ec7f44b8be3f88149725d26a810169c Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.219181 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.229046 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.231187 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.232113 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v"] Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.234914 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.734881612 +0000 UTC m=+141.858823049 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.235044 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.240662 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.260999 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.261711 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-g79kz" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.268689 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.278374 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bp9rq"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.337258 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.338750 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.838703866 +0000 UTC m=+141.962645303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: W1128 13:31:31.353162 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb88b052c_169c_4802_8705_bbd084630a7a.slice/crio-39a3d0eba56ea263ca03817b17342ce75e6cd5604d0fc6560c9166176b8faab5 WatchSource:0}: Error finding container 39a3d0eba56ea263ca03817b17342ce75e6cd5604d0fc6560c9166176b8faab5: Status 404 returned error can't find the container with id 39a3d0eba56ea263ca03817b17342ce75e6cd5604d0fc6560c9166176b8faab5 Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.439681 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.440052 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:31.940036478 +0000 UTC m=+142.063977915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: W1128 13:31:31.456962 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda29f471d_4c21_4e9b_a479_3b8f0d1e0878.slice/crio-513b76951a7ef4c8d3821a1408591e4613bd1a416d7cb7caee35e128bbc42661 WatchSource:0}: Error finding container 513b76951a7ef4c8d3821a1408591e4613bd1a416d7cb7caee35e128bbc42661: Status 404 returned error can't find the container with id 513b76951a7ef4c8d3821a1408591e4613bd1a416d7cb7caee35e128bbc42661 Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.475513 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-prc44"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.546057 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.549480 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.049459723 +0000 UTC m=+142.173401160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.597023 4857 generic.go:334] "Generic (PLEG): container finished" podID="5ccc741a-98e5-420d-ad6e-4260ea2abb1f" containerID="3761c0c9ce5a87dc9e5dabadcb33f28453bf507a78be47ff6eea12cfa2313e8b" exitCode=0 Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.597088 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" event={"ID":"5ccc741a-98e5-420d-ad6e-4260ea2abb1f","Type":"ContainerDied","Data":"3761c0c9ce5a87dc9e5dabadcb33f28453bf507a78be47ff6eea12cfa2313e8b"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.597115 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" event={"ID":"5ccc741a-98e5-420d-ad6e-4260ea2abb1f","Type":"ContainerStarted","Data":"376ff6f1c464237a9cf192716ce52f574dc9967112987bb40519dae7a6487924"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.603894 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" event={"ID":"4546bf69-bd62-41a8-ade2-31c4e7f198da","Type":"ContainerStarted","Data":"97370f3aaaa36ff01f19bf2542bca255fb2df82235c030ae23959b5ca161a3d2"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.603941 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" event={"ID":"4546bf69-bd62-41a8-ade2-31c4e7f198da","Type":"ContainerStarted","Data":"06a1a2e9121407735587dd4c7f2597ea7b6396ce6fc008b7b5754aa43a2a42dc"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.619629 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" event={"ID":"b88b052c-169c-4802-8705-bbd084630a7a","Type":"ContainerStarted","Data":"39a3d0eba56ea263ca03817b17342ce75e6cd5604d0fc6560c9166176b8faab5"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.642231 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-5c5lq" event={"ID":"1cb29ded-850a-45ff-8201-a991fe779c01","Type":"ContainerStarted","Data":"91df0869db6696e344143c391199fa1ab3cd69636f4b4db6a9e6ca5b5211e027"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.642288 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-5c5lq" event={"ID":"1cb29ded-850a-45ff-8201-a991fe779c01","Type":"ContainerStarted","Data":"99c1f1f5c07faf38ec80299cff5701e3093a3872dc262e7cf243077232fda39c"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.643235 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.648703 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.649142 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.149126678 +0000 UTC m=+142.273068115 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.662848 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-5c5lq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.662929 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5c5lq" podUID="1cb29ded-850a-45ff-8201-a991fe779c01" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.663392 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" event={"ID":"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558","Type":"ContainerStarted","Data":"c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.663424 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" event={"ID":"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558","Type":"ContainerStarted","Data":"efbb52fbe69a9f85b7e950dee6a37a78c9bd15fa14fcc36abcec73c2c9a8da17"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.664603 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.672678 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-5zxrw" event={"ID":"ece8c985-18db-4433-8f21-503a4718c5bf","Type":"ContainerStarted","Data":"6e21895e48c8401d6ad917cd060db3943aeeb3311e0e10ac9cdf3b8a06a2541c"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.673070 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-xdww2" podStartSLOduration=120.673046166 podStartE2EDuration="2m0.673046166s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:31.671986515 +0000 UTC m=+141.795927942" watchObservedRunningTime="2025-11-28 13:31:31.673046166 +0000 UTC m=+141.796987623" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.699933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" event={"ID":"ef70370a-9dc0-4378-b4f5-ae6b279330b4","Type":"ContainerStarted","Data":"5737caa632c948db6b982286af07cd62e7c26b8206373e00e7d869eda2e58890"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.710180 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" event={"ID":"a73f6e61-db8f-4ada-8825-201429e1f803","Type":"ContainerStarted","Data":"e9d10eebc69e37eccab7b6a2297e74b5e1a56b7d7ff5052db3b1a2a1b799161e"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.732289 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" event={"ID":"a29f471d-4c21-4e9b-a479-3b8f0d1e0878","Type":"ContainerStarted","Data":"513b76951a7ef4c8d3821a1408591e4613bd1a416d7cb7caee35e128bbc42661"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.741450 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.750019 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.752809 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.252789528 +0000 UTC m=+142.376730965 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.757120 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" event={"ID":"4427cefb-2293-4022-afed-686499999cef","Type":"ContainerStarted","Data":"080a54bd38b356ab7bb17ce6951372a28f2984f849004bd58059a758c77a4ded"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.766649 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-5c5lq" podStartSLOduration=120.766627535 podStartE2EDuration="2m0.766627535s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:31.733115952 +0000 UTC m=+141.857057389" watchObservedRunningTime="2025-11-28 13:31:31.766627535 +0000 UTC m=+141.890568962" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.769864 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" event={"ID":"de71ba59-fb73-4f54-be20-690e4e94b446","Type":"ContainerStarted","Data":"4efcecb0e4333dc671beb8406e2b692e59e1750dbf2ede4b9244d67326f5dc31"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.769916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" event={"ID":"de71ba59-fb73-4f54-be20-690e4e94b446","Type":"ContainerStarted","Data":"8d542de7fb175afeab6f61ef7731649b8009bb03f81a732af8530c8aafc429f4"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.771624 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" podStartSLOduration=120.771614509 podStartE2EDuration="2m0.771614509s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:31.768648783 +0000 UTC m=+141.892590240" watchObservedRunningTime="2025-11-28 13:31:31.771614509 +0000 UTC m=+141.895555946" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.856012 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.857159 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.357142827 +0000 UTC m=+142.481084264 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.858160 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" event={"ID":"34073888-721b-429f-81c5-e12d90b5b7e8","Type":"ContainerStarted","Data":"dea5bb62ee1fb2bdefeb84b646d295b27ec7f44b8be3f88149725d26a810169c"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.880912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" event={"ID":"49fd1de8-3f87-4d33-b209-df83dd1096a8","Type":"ContainerStarted","Data":"79ca42dd73efffbc9cae44ae8cdff047fcc5b4ec57f30960fd67f42939a0b22b"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.885481 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-6qvft"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.886282 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" event={"ID":"39eab4a3-7a84-4550-bd46-1fbdc8f8e087","Type":"ContainerStarted","Data":"85418c19aa0b77b87607e4e20a6482af508043da810975dcf6b383f25e362c23"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.890558 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.891888 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" event={"ID":"fd021cd9-72f4-40a3-87f9-cb253db8ebdc","Type":"ContainerStarted","Data":"bf2124b31195c92a9d549490268c6a0015bdd33e932da567bea72c3d1ba070a0"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.891957 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-6gf7t"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.899674 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-92tgg" podStartSLOduration=120.899655849 podStartE2EDuration="2m0.899655849s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:31.899559556 +0000 UTC m=+142.023501003" watchObservedRunningTime="2025-11-28 13:31:31.899655849 +0000 UTC m=+142.023597296" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.929098 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jp48x"] Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.940506 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" event={"ID":"0723fd39-fd72-4aae-a8ac-4a69a9cea44e","Type":"ContainerStarted","Data":"9c0e737123fe8eb433fab63a8f20d62716be6d186d2a25fe799d384dcc1d63ce"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.940559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" event={"ID":"0723fd39-fd72-4aae-a8ac-4a69a9cea44e","Type":"ContainerStarted","Data":"e3bba44b5183a01cc2e71d3b55167aa75018c068b3970b6d47f7d15ddc27f6de"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.960798 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.961919 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" event={"ID":"1d2f2183-42ac-4260-88cd-9379d25bf71d","Type":"ContainerStarted","Data":"4106f593e9489a1bc3f72c4bc5a6ccdb384af1f6e6deec2b2cdc96b2ae71d16e"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.967618 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" event={"ID":"599a933f-fb51-4ba8-bf84-bd7f9ce63af5","Type":"ContainerStarted","Data":"31862c5894c382501271b2d1960941d88587539ba4f942ebd4f8ee47286befab"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.970686 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" event={"ID":"492c9136-a973-43ca-ad2c-4650392e6e38","Type":"ContainerStarted","Data":"5e6361450388ab01e46cd873670582a83529850edf7d5cacbf36eb1254d1dba6"} Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.971758 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" event={"ID":"951e34a8-00f5-479a-9de8-ee53ee32da75","Type":"ContainerStarted","Data":"7949e119cbd0d8311f7bfa30a9b72cbda0b51db3197046ab1e8a3400780bac4a"} Nov 28 13:31:31 crc kubenswrapper[4857]: E1128 13:31:31.971793 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.471770372 +0000 UTC m=+142.595711809 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:31 crc kubenswrapper[4857]: I1128 13:31:31.974666 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" event={"ID":"d6bc1489-81cc-49df-a936-c0c19721b8c3","Type":"ContainerStarted","Data":"f7add22b8adea762344e391fa75ed0bb5084898829d8fee3aed829ab0f8873a0"} Nov 28 13:31:32 crc kubenswrapper[4857]: W1128 13:31:32.009566 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f40ecda_b519_4cfe_8b7b_6854e018fe24.slice/crio-48727e21f4e7089b0ebf5bfd06470f6ac7b11000c5d6418336dcf275dabf402f WatchSource:0}: Error finding container 48727e21f4e7089b0ebf5bfd06470f6ac7b11000c5d6418336dcf275dabf402f: Status 404 returned error can't find the container with id 48727e21f4e7089b0ebf5bfd06470f6ac7b11000c5d6418336dcf275dabf402f Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.015713 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-m658x"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.024178 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7ch8g"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.063629 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.064144 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.564124766 +0000 UTC m=+142.688066203 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.091106 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qnqmv"] Nov 28 13:31:32 crc kubenswrapper[4857]: W1128 13:31:32.160157 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7c609810_eec1_4f73_ad29_24fc190b1ffa.slice/crio-2aa97746e6549ba1a69644620130772b06b5346b1743e7a6f3eb72c8e7a2232d WatchSource:0}: Error finding container 2aa97746e6549ba1a69644620130772b06b5346b1743e7a6f3eb72c8e7a2232d: Status 404 returned error can't find the container with id 2aa97746e6549ba1a69644620130772b06b5346b1743e7a6f3eb72c8e7a2232d Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.164799 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.165401 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.665385846 +0000 UTC m=+142.789327283 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: W1128 13:31:32.167110 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod194b3b2b_5c56_44e9_8800_fd8fafbcd3d3.slice/crio-74cc16f8b2da92c68781c7873cfbecce6f1f8831528336ee8bc350cc81e749e0 WatchSource:0}: Error finding container 74cc16f8b2da92c68781c7873cfbecce6f1f8831528336ee8bc350cc81e749e0: Status 404 returned error can't find the container with id 74cc16f8b2da92c68781c7873cfbecce6f1f8831528336ee8bc350cc81e749e0 Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.266251 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.266818 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.766802141 +0000 UTC m=+142.890743578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.373891 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.374226 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.874215039 +0000 UTC m=+142.998156476 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.474328 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.474590 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:32.974571703 +0000 UTC m=+143.098513140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.476387 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.514064 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jjd26"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.577356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.577716 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.077696847 +0000 UTC m=+143.201638284 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.629584 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.685576 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.686495 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.686939 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.186919126 +0000 UTC m=+143.310860573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.712424 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-g79kz"] Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.787953 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.788417 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.288401533 +0000 UTC m=+143.412342970 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:32 crc kubenswrapper[4857]: I1128 13:31:32.892574 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:32 crc kubenswrapper[4857]: E1128 13:31:32.893280 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.393263417 +0000 UTC m=+143.517204854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.000395 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.000824 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.500809968 +0000 UTC m=+143.624751415 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.005550 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ghlnz" event={"ID":"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516","Type":"ContainerStarted","Data":"d5c3aa6dad5a8c33ccf7be3378713173fd18e7be673350bf20e836c7f2267928"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.008659 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" event={"ID":"94ffd41a-3797-4314-a270-c2b1bd143b04","Type":"ContainerStarted","Data":"a1d5237ea2d3f08a4fafe23c299fa10749f78246ea7ddfe694550d27308af303"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.024108 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" event={"ID":"13499e68-b11e-4914-8a02-7c63458afd38","Type":"ContainerStarted","Data":"051b9398862401f238082f17e0dbe9ac2d3464eb1789e82295b113f49f479b6d"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.042188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" event={"ID":"7c609810-eec1-4f73-ad29-24fc190b1ffa","Type":"ContainerStarted","Data":"2aa97746e6549ba1a69644620130772b06b5346b1743e7a6f3eb72c8e7a2232d"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.047628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" event={"ID":"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3","Type":"ContainerStarted","Data":"74cc16f8b2da92c68781c7873cfbecce6f1f8831528336ee8bc350cc81e749e0"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.067218 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.097151 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" event={"ID":"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a","Type":"ContainerStarted","Data":"7505079e84ee0be2d17b27df90b7737994bbe795de7c6b7071bfe0bd0b199467"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.101621 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.102135 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.60211665 +0000 UTC m=+143.726058087 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.106888 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plzgn"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.108448 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" event={"ID":"ef70370a-9dc0-4378-b4f5-ae6b279330b4","Type":"ContainerStarted","Data":"265769e2fff815605794ab321d5250433f00a156c6508e3055ae022007f55bcf"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.121349 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.152045 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" event={"ID":"1d2f2183-42ac-4260-88cd-9379d25bf71d","Type":"ContainerStarted","Data":"80ce68478b86c8c948ec3066ff7112496970ef411442847060b2a3c68608162b"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.155695 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" event={"ID":"3b437875-4ae6-4455-a281-84281a93fad0","Type":"ContainerStarted","Data":"3804861b042a0b841e98a032345dd038b5062cb3d78e5c6cd78909af4a735d78"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.159425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" event={"ID":"39eab4a3-7a84-4550-bd46-1fbdc8f8e087","Type":"ContainerStarted","Data":"6cc2346720f114ceef9006fbf71514b95518c8031b6c73f8bd63b11f997da112"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.168795 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" event={"ID":"fd021cd9-72f4-40a3-87f9-cb253db8ebdc","Type":"ContainerStarted","Data":"0dd759268ac93f6403775440332278f9d09745b9209f2d767b6d17a6b3d4131a"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.180421 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-hl6h5" podStartSLOduration=121.18039494 podStartE2EDuration="2m1.18039494s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.177340492 +0000 UTC m=+143.301281929" watchObservedRunningTime="2025-11-28 13:31:33.18039494 +0000 UTC m=+143.304336377" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.191543 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" event={"ID":"34073888-721b-429f-81c5-e12d90b5b7e8","Type":"ContainerStarted","Data":"97e5678fc8d5235191c088f504574078c9dbfa451bb524aa65d27080f7eace95"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.198216 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" event={"ID":"0723fd39-fd72-4aae-a8ac-4a69a9cea44e","Type":"ContainerStarted","Data":"330ea283a1d77387b8bfe07f29cf83481ba5d3e99170611134a414feb53b3d94"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.205821 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.206009 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-t48t9" podStartSLOduration=122.205989226 podStartE2EDuration="2m2.205989226s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.204508733 +0000 UTC m=+143.328450170" watchObservedRunningTime="2025-11-28 13:31:33.205989226 +0000 UTC m=+143.329930663" Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.208033 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.707960392 +0000 UTC m=+143.831901829 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.225053 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" event={"ID":"a73f6e61-db8f-4ada-8825-201429e1f803","Type":"ContainerStarted","Data":"8f595084d5379b23dcf22428df1344ecd3c50a025caca5aaa3c6c33309b85831"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.239257 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-g79kz" event={"ID":"fff47bbd-bac2-436d-b331-23f8aa856bd0","Type":"ContainerStarted","Data":"3584865709d2bf5836536207cdd7bb6ee59ba9f8f744e34289bed51034c5b29e"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.247552 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jjd26" event={"ID":"1bc08a92-d2bf-41ce-84b7-675c9c6a24de","Type":"ContainerStarted","Data":"6538113c5e15279f0291ed6c1960affd9caae9a7dcf36b67d84df1b65c31c368"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.253136 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-prc44" podStartSLOduration=122.25311533 podStartE2EDuration="2m2.25311533s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.240760975 +0000 UTC m=+143.364702422" watchObservedRunningTime="2025-11-28 13:31:33.25311533 +0000 UTC m=+143.377056767" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.259024 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.259070 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.267495 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.267554 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2"] Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.268499 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-nm6c5" podStartSLOduration=122.268478122 podStartE2EDuration="2m2.268478122s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.26770866 +0000 UTC m=+143.391650097" watchObservedRunningTime="2025-11-28 13:31:33.268478122 +0000 UTC m=+143.392419559" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.270186 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" event={"ID":"49fd1de8-3f87-4d33-b209-df83dd1096a8","Type":"ContainerStarted","Data":"039646d2cc0856a7cd5b98a3c85cd50c130f5b53d297446b8ca635aa7ff46c20"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.282221 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" event={"ID":"844ce19a-9c82-4a37-8170-db724fabc85c","Type":"ContainerStarted","Data":"28eb68cc2b1d3c120c5ffaae328293910313ad150c80bc7249163cef1a0f40c3"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.286623 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-5zxrw" event={"ID":"ece8c985-18db-4433-8f21-503a4718c5bf","Type":"ContainerStarted","Data":"33c2261e51ff9b8938d0da72b054521beb5f1f6e9deb3746091720ad41b4bf3a"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.294953 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-6gf7t" event={"ID":"5f40ecda-b519-4cfe-8b7b-6854e018fe24","Type":"ContainerStarted","Data":"48727e21f4e7089b0ebf5bfd06470f6ac7b11000c5d6418336dcf275dabf402f"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.297925 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" event={"ID":"492c9136-a973-43ca-ad2c-4650392e6e38","Type":"ContainerStarted","Data":"1947349c4ef6f0798f478886d4d53de1558115baf805ee0e19f00932f292733b"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.298841 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.302134 4857 generic.go:334] "Generic (PLEG): container finished" podID="951e34a8-00f5-479a-9de8-ee53ee32da75" containerID="0d50d377b2b2b0bcfe35c9515a0f830e49df54c4a1362b68a080e33bf46f023d" exitCode=0 Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.302252 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" event={"ID":"951e34a8-00f5-479a-9de8-ee53ee32da75","Type":"ContainerDied","Data":"0d50d377b2b2b0bcfe35c9515a0f830e49df54c4a1362b68a080e33bf46f023d"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.306404 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.307062 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.80703234 +0000 UTC m=+143.930973777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.307066 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" podStartSLOduration=93.30704333 podStartE2EDuration="1m33.30704333s" podCreationTimestamp="2025-11-28 13:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.305776384 +0000 UTC m=+143.429717821" watchObservedRunningTime="2025-11-28 13:31:33.30704333 +0000 UTC m=+143.430984767" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.317307 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" event={"ID":"93adcd15-ad3e-4583-81b9-1aa85b5d7ada","Type":"ContainerStarted","Data":"a9d827fd2038ba42ae4db85f3a40af3e94f293f73a41d33e7a56e880bcb98f07"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.331784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" event={"ID":"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a","Type":"ContainerStarted","Data":"0c12db395c26b8d38d48347426b1a2d3151731ad3fa43565122f998c3f11fabd"} Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.331892 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-5c5lq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.331922 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5c5lq" podUID="1cb29ded-850a-45ff-8201-a991fe779c01" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.342924 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" podStartSLOduration=121.342895921 podStartE2EDuration="2m1.342895921s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.340735249 +0000 UTC m=+143.464676686" watchObservedRunningTime="2025-11-28 13:31:33.342895921 +0000 UTC m=+143.466837358" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.357573 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-5zxrw" podStartSLOduration=7.3575528519999995 podStartE2EDuration="7.357552852s" podCreationTimestamp="2025-11-28 13:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:33.35538509 +0000 UTC m=+143.479326527" watchObservedRunningTime="2025-11-28 13:31:33.357552852 +0000 UTC m=+143.481494289" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.409498 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.411325 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:33.911308037 +0000 UTC m=+144.035249474 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: W1128 13:31:33.450390 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd39d20be_e63a_4fef_bde2_6f4e76051828.slice/crio-a0357fbeba1a97bce068bef3c178e83e2acd22f7b1b058e154faff1b6e264719 WatchSource:0}: Error finding container a0357fbeba1a97bce068bef3c178e83e2acd22f7b1b058e154faff1b6e264719: Status 404 returned error can't find the container with id a0357fbeba1a97bce068bef3c178e83e2acd22f7b1b058e154faff1b6e264719 Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.510749 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.511224 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.011206728 +0000 UTC m=+144.135148165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.614410 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.614749 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.114733834 +0000 UTC m=+144.238675271 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.715064 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.715349 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.215285034 +0000 UTC m=+144.339226481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.715629 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.716045 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.216031945 +0000 UTC m=+144.339973382 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.768220 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-d8ghr" Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.816812 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.817390 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.317370628 +0000 UTC m=+144.441312075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:33 crc kubenswrapper[4857]: I1128 13:31:33.919297 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:33 crc kubenswrapper[4857]: E1128 13:31:33.919731 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.41971694 +0000 UTC m=+144.543658387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.030558 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.030747 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.53071594 +0000 UTC m=+144.654657367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.031393 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.031780 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.53177056 +0000 UTC m=+144.655711997 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.131860 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.132166 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.632149424 +0000 UTC m=+144.756090861 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.232582 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.232978 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.732944701 +0000 UTC m=+144.856886138 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.332922 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.333190 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.833173512 +0000 UTC m=+144.957114949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.333568 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.333908 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.833897863 +0000 UTC m=+144.957839310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.393489 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" event={"ID":"ef70370a-9dc0-4378-b4f5-ae6b279330b4","Type":"ContainerStarted","Data":"d7a6a3577417d104b44814e7dfb23cb862d8cb49422458c0ac3eb25a57e674d9"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.426386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ghlnz" event={"ID":"6c5bcd39-8067-47c3-bc3b-5aa1aed7b516","Type":"ContainerStarted","Data":"4ba7b63833f10350a95afb5eb28de7d8070a5b50bf520df40d56f8bc31b2d983"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.433911 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-6gf7t" event={"ID":"5f40ecda-b519-4cfe-8b7b-6854e018fe24","Type":"ContainerStarted","Data":"2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.434912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" event={"ID":"b88b052c-169c-4802-8705-bbd084630a7a","Type":"ContainerStarted","Data":"416c5d84774add786579095020f446f27c7d97804f1c4bc6c0ef07806a8f6592"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.436307 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.437120 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.437281 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.937261144 +0000 UTC m=+145.061202581 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.437566 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.437914 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:34.937898862 +0000 UTC m=+145.061840309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.455850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" event={"ID":"00b39dd2-3b38-4fec-9162-e06fcb862af2","Type":"ContainerStarted","Data":"075c2620e322285ec92befa08839b84df6e8d45e75b47ec46c5a86a688ec7e8e"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.462145 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" event={"ID":"a73f6e61-db8f-4ada-8825-201429e1f803","Type":"ContainerStarted","Data":"d0b4882a38c110e83bd0cbf42d3f468369a2b9cb08507a3d00c6b06810682ce1"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.463359 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.463543 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" event={"ID":"d39d20be-e63a-4fef-bde2-6f4e76051828","Type":"ContainerStarted","Data":"a0357fbeba1a97bce068bef3c178e83e2acd22f7b1b058e154faff1b6e264719"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.487570 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" event={"ID":"844ce19a-9c82-4a37-8170-db724fabc85c","Type":"ContainerStarted","Data":"78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.488043 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.490333 4857 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-m9bfw container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.490383 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" podUID="844ce19a-9c82-4a37-8170-db724fabc85c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.509310 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" event={"ID":"194b3b2b-5c56-44e9-8800-fd8fafbcd3d3","Type":"ContainerStarted","Data":"33dab02ef2990ee2f7367a0c40000fc9130e422c7b75b4baf1f821d1129d9f28"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.521631 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-ps26v" podStartSLOduration=122.521612598 podStartE2EDuration="2m2.521612598s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.520786595 +0000 UTC m=+144.644728032" watchObservedRunningTime="2025-11-28 13:31:34.521612598 +0000 UTC m=+144.645554025" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.538054 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.538286 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.038230476 +0000 UTC m=+145.162171913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.538560 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.539988 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.039955536 +0000 UTC m=+145.163896973 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.549270 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" event={"ID":"51677670-528c-40ba-acf4-e9b506e48a84","Type":"ContainerStarted","Data":"121ea7cdf6912ff4fd343a3df81f8cc746720a393778452fd798eeea488b42ac"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.582191 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" podStartSLOduration=122.582172599 podStartE2EDuration="2m2.582172599s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.581465039 +0000 UTC m=+144.705406476" watchObservedRunningTime="2025-11-28 13:31:34.582172599 +0000 UTC m=+144.706114036" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.597013 4857 generic.go:334] "Generic (PLEG): container finished" podID="599a933f-fb51-4ba8-bf84-bd7f9ce63af5" containerID="e49227aa57f3d246ad1c0c8585c098e4165b7147aba37620e1c5dbd77b9bfcbb" exitCode=0 Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.597126 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" event={"ID":"599a933f-fb51-4ba8-bf84-bd7f9ce63af5","Type":"ContainerDied","Data":"e49227aa57f3d246ad1c0c8585c098e4165b7147aba37620e1c5dbd77b9bfcbb"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.615287 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" event={"ID":"beba01d0-40fd-49b1-ba9c-99dd121db738","Type":"ContainerStarted","Data":"84170dcb1a0495735cbdeea6dcb9098a20043e940839a6f202978b885602caff"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.628754 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-mrg8q" podStartSLOduration=123.628710807 podStartE2EDuration="2m3.628710807s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.627655066 +0000 UTC m=+144.751596503" watchObservedRunningTime="2025-11-28 13:31:34.628710807 +0000 UTC m=+144.752652244" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.650564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.651481 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.151466061 +0000 UTC m=+145.275407498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.676309 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" event={"ID":"924df434-aae1-4b09-adc3-01b3e079fb3f","Type":"ContainerStarted","Data":"6216b0591dc51bced7ae0d63ce79fa1692f92c4e673ef2571a367efb1e8ec021"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.693495 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" event={"ID":"7ee007cb-bc82-48e8-a1ff-34f37f55ba19","Type":"ContainerStarted","Data":"126e1c7202616a863ea33a79ed8d7af26b62d8bec68d14312c81352ac13dd817"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.709348 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" event={"ID":"a29f471d-4c21-4e9b-a479-3b8f0d1e0878","Type":"ContainerStarted","Data":"d1e4d67829ffa28f03617929b04789a34fc49599c0b678fe87d6a47eceac7773"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.710147 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.712023 4857 patch_prober.go:28] interesting pod/console-operator-58897d9998-bp9rq container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.712075 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" podUID="a29f471d-4c21-4e9b-a479-3b8f0d1e0878" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.718327 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" event={"ID":"7c609810-eec1-4f73-ad29-24fc190b1ffa","Type":"ContainerStarted","Data":"deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.719140 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.720891 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" event={"ID":"4427cefb-2293-4022-afed-686499999cef","Type":"ContainerStarted","Data":"a04a3e8bd7c7edf1a1d51ecb475f14c8a2ba9f0ff494ac16a351ff574dddae9d"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.723142 4857 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-m658x container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.723198 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.731746 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-7ch8g" podStartSLOduration=122.731726578 podStartE2EDuration="2m2.731726578s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.726483767 +0000 UTC m=+144.850425204" watchObservedRunningTime="2025-11-28 13:31:34.731726578 +0000 UTC m=+144.855668015" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.733916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" event={"ID":"5ccc741a-98e5-420d-ad6e-4260ea2abb1f","Type":"ContainerStarted","Data":"2a1efe6572da5c9f4d2cc9ca919b4b32a6afc56396053ca61af7bcb93cb83b00"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.737979 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" event={"ID":"d6bc1489-81cc-49df-a936-c0c19721b8c3","Type":"ContainerStarted","Data":"3a13a2b69bf9f7edbacabfe329172adbbcfbe64ed5d3e248d40b6f1df85877a5"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.773007 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" event={"ID":"6e62429f-c841-417c-b2cd-32130d5cf05f","Type":"ContainerStarted","Data":"fe99a3b9f6f6e1fd78a6691ed563acea256b458f184712a6361ec4bcbff23822"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.789138 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" podStartSLOduration=123.789114927 podStartE2EDuration="2m3.789114927s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.767544767 +0000 UTC m=+144.891486204" watchObservedRunningTime="2025-11-28 13:31:34.789114927 +0000 UTC m=+144.913056364" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.797933 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.798372 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.298356343 +0000 UTC m=+145.422297780 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.815515 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" event={"ID":"94ffd41a-3797-4314-a270-c2b1bd143b04","Type":"ContainerStarted","Data":"aa2e7ee0b81300539cd259d6ed8139ca40ea193c746cf635ca7caae3fd4511a2"} Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.816917 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" podStartSLOduration=123.816898316 podStartE2EDuration="2m3.816898316s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.794662676 +0000 UTC m=+144.918604133" watchObservedRunningTime="2025-11-28 13:31:34.816898316 +0000 UTC m=+144.940839753" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.823222 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-5c5lq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.824157 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5c5lq" podUID="1cb29ded-850a-45ff-8201-a991fe779c01" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.878903 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-p7lbs" podStartSLOduration=123.878872587 podStartE2EDuration="2m3.878872587s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.871958208 +0000 UTC m=+144.995899645" watchObservedRunningTime="2025-11-28 13:31:34.878872587 +0000 UTC m=+145.002814024" Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.898683 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.899070 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.399025956 +0000 UTC m=+145.522967393 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.899819 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:34 crc kubenswrapper[4857]: E1128 13:31:34.905130 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.405115511 +0000 UTC m=+145.529056948 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:34 crc kubenswrapper[4857]: I1128 13:31:34.908366 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-gv6hw" podStartSLOduration=123.908345494 podStartE2EDuration="2m3.908345494s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:34.907138719 +0000 UTC m=+145.031080156" watchObservedRunningTime="2025-11-28 13:31:34.908345494 +0000 UTC m=+145.032286941" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.001332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.001994 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.501978455 +0000 UTC m=+145.625919892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.103044 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.103374 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.603362059 +0000 UTC m=+145.727303496 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.206584 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.206937 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.706922236 +0000 UTC m=+145.830863673 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.207133 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.207615 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.707465661 +0000 UTC m=+145.831407098 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.309000 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.309176 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.809144064 +0000 UTC m=+145.933085501 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.309518 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.310081 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.81004889 +0000 UTC m=+145.933990327 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.410839 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.411174 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.911151376 +0000 UTC m=+146.035092813 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.411208 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.411563 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:35.911549107 +0000 UTC m=+146.035490544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.512843 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.513044 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.013021244 +0000 UTC m=+146.136962681 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.513584 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.513934 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.01392634 +0000 UTC m=+146.137867767 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.615110 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.615591 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.115575901 +0000 UTC m=+146.239517338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.716753 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.717662 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.217642415 +0000 UTC m=+146.341583852 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.818708 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.819165 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.319130162 +0000 UTC m=+146.443071599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.826666 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" event={"ID":"94ffd41a-3797-4314-a270-c2b1bd143b04","Type":"ContainerStarted","Data":"4c4e0317be87f7bfff56e2a1785c2b8e8bc7fbcebe27c8298b5694c98abaabc6"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.826745 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.830804 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" event={"ID":"d39d20be-e63a-4fef-bde2-6f4e76051828","Type":"ContainerStarted","Data":"91688aa5b731ef7656426e19850d67e690fb9166f80678aeea6a6e3497f9c462"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.845563 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" event={"ID":"6e62429f-c841-417c-b2cd-32130d5cf05f","Type":"ContainerStarted","Data":"d46e135decad89ce6127715309d73722a92b92bff91b15bcc94081622f28a265"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.845626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" event={"ID":"6e62429f-c841-417c-b2cd-32130d5cf05f","Type":"ContainerStarted","Data":"d8be7dd47e531c100b04b9fa7b413b7c8824d27248df84cf2ad700ffb9906a81"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.859497 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" podStartSLOduration=123.859478762 podStartE2EDuration="2m3.859478762s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:35.85697968 +0000 UTC m=+145.980921127" watchObservedRunningTime="2025-11-28 13:31:35.859478762 +0000 UTC m=+145.983420199" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.865757 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" event={"ID":"599a933f-fb51-4ba8-bf84-bd7f9ce63af5","Type":"ContainerStarted","Data":"8b711f64bf07306272e8fbc21fce8725bb180da3bfd256e27392c98dc72768db"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.867185 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" event={"ID":"7ee007cb-bc82-48e8-a1ff-34f37f55ba19","Type":"ContainerStarted","Data":"f76e642eda6d388ac2f9619d60a9a46d0d504b42208e6843e12b0ba01619d3d3"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.867389 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.869464 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" event={"ID":"13499e68-b11e-4914-8a02-7c63458afd38","Type":"ContainerStarted","Data":"ff09be04d16d8c5a9bac9469bb6d527db9feb9656ce2136a5bcfc9f0577fce78"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.869500 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" event={"ID":"13499e68-b11e-4914-8a02-7c63458afd38","Type":"ContainerStarted","Data":"d6dcdfc345b4ee97e80e3cf63810fc4e029da7d5f611874c2f2839ae41a18617"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.871169 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" event={"ID":"93adcd15-ad3e-4583-81b9-1aa85b5d7ada","Type":"ContainerStarted","Data":"33b28f0e737a44c5eb386e189a82bb63743a73484c9fb22d9fb949d37fc30c13"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.871210 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" event={"ID":"93adcd15-ad3e-4583-81b9-1aa85b5d7ada","Type":"ContainerStarted","Data":"c45675265c61e26dfc18d5124e9c765ab1633f9298c625a0206e5b17dee9bd4d"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.875314 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.876835 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" event={"ID":"924df434-aae1-4b09-adc3-01b3e079fb3f","Type":"ContainerStarted","Data":"ec442168590406156153a18b757137a79ea51dde1c8e06111a38b289b0648087"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.879143 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" event={"ID":"00b39dd2-3b38-4fec-9162-e06fcb862af2","Type":"ContainerStarted","Data":"dae65383b1aca691cdd02e3469a2968c28fcd97afd1252db3aadeb37df962534"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.879175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" event={"ID":"00b39dd2-3b38-4fec-9162-e06fcb862af2","Type":"ContainerStarted","Data":"e8d973432062ac986c3d91b3dbc3f38c166e307e5309200cb584bab3a7b62f69"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.881474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" event={"ID":"951e34a8-00f5-479a-9de8-ee53ee32da75","Type":"ContainerStarted","Data":"053cbcc151af7eb72a099c1ccf8ca7f2b3bd5bf12c76990219654d4bbc1a4e62"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.882033 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.883231 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" event={"ID":"3b437875-4ae6-4455-a281-84281a93fad0","Type":"ContainerStarted","Data":"6599e0b4298ebdccdd21e807cebaa7e049910d726114ec14ba59191a43dfb9e6"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.884206 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" event={"ID":"51677670-528c-40ba-acf4-e9b506e48a84","Type":"ContainerStarted","Data":"9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.884921 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.887094 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plzgn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.887138 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" podUID="51677670-528c-40ba-acf4-e9b506e48a84" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.888594 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" event={"ID":"beba01d0-40fd-49b1-ba9c-99dd121db738","Type":"ContainerStarted","Data":"793573f321d8db6a12b26cd1b4814d01f36b0d7e4a8414828a0b829cf67a52d0"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.891764 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-g79kz" event={"ID":"fff47bbd-bac2-436d-b331-23f8aa856bd0","Type":"ContainerStarted","Data":"37f792fea19d259597516f71c0cae8b5381af8aaac0327e3f8b29ad1b81988ec"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.893469 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-fcpdp" podStartSLOduration=124.893454048 podStartE2EDuration="2m4.893454048s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:35.892788679 +0000 UTC m=+146.016730116" watchObservedRunningTime="2025-11-28 13:31:35.893454048 +0000 UTC m=+146.017395485" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.898166 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" event={"ID":"3150e4a1-9ef4-4a79-a40c-8eea82b5c19a","Type":"ContainerStarted","Data":"1485785b3442fe62c2d8721c7e4cd8482f64dd2b10be1769dce6e507bb53d15f"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.914097 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" event={"ID":"34073888-721b-429f-81c5-e12d90b5b7e8","Type":"ContainerStarted","Data":"1753748cd3ecd19ec2e74393bd11a3bd4abf3730bf0296f96558d5c71cee6230"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.917307 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-4xnj2" podStartSLOduration=124.917291573 podStartE2EDuration="2m4.917291573s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:35.915348467 +0000 UTC m=+146.039289904" watchObservedRunningTime="2025-11-28 13:31:35.917291573 +0000 UTC m=+146.041233010" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.919922 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.928343 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" event={"ID":"5ccc741a-98e5-420d-ad6e-4260ea2abb1f","Type":"ContainerStarted","Data":"de9d8d9ed68e156b253e2e87c8e5eada9dc2f9c9eaf1978ddba8e5dcf0f7107f"} Nov 28 13:31:35 crc kubenswrapper[4857]: E1128 13:31:35.932229 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.432201502 +0000 UTC m=+146.556142939 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.950137 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jjd26" event={"ID":"1bc08a92-d2bf-41ce-84b7-675c9c6a24de","Type":"ContainerStarted","Data":"30840a0735c54159c5c579d3df3e752a862f3085c080ef2a325d8d78af42abd1"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.950190 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jjd26" event={"ID":"1bc08a92-d2bf-41ce-84b7-675c9c6a24de","Type":"ContainerStarted","Data":"10c8d12b2f58ffdcf761daaf059a6c22e43f7ac5f12ebd8ada659ef74404a5f1"} Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.950886 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.956619 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-78f6q" podStartSLOduration=124.956596163 podStartE2EDuration="2m4.956596163s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:35.955288935 +0000 UTC m=+146.079230382" watchObservedRunningTime="2025-11-28 13:31:35.956596163 +0000 UTC m=+146.080537600" Nov 28 13:31:35 crc kubenswrapper[4857]: I1128 13:31:35.989660 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" event={"ID":"454dd4da-e6c5-43fb-b4d2-0cdc4121dc7a","Type":"ContainerStarted","Data":"d04940a94b45b1ab5f1254f79888d332df0eb9ab291fb4d70e600efc55e0a7bf"} Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.011761 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5wxk5" podStartSLOduration=125.011743178 podStartE2EDuration="2m5.011743178s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.011668296 +0000 UTC m=+146.135609733" watchObservedRunningTime="2025-11-28 13:31:36.011743178 +0000 UTC m=+146.135684615" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.019188 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.024509 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.026074 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.526056339 +0000 UTC m=+146.649997776 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.066655 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.068820 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" podStartSLOduration=125.068798318 podStartE2EDuration="2m5.068798318s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.055588768 +0000 UTC m=+146.179530205" watchObservedRunningTime="2025-11-28 13:31:36.068798318 +0000 UTC m=+146.192739745" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.079574 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:36 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:36 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:36 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.079663 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.127577 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.136467 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.636447502 +0000 UTC m=+146.760389009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.144111 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-g79kz" podStartSLOduration=10.144083692 podStartE2EDuration="10.144083692s" podCreationTimestamp="2025-11-28 13:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.142004362 +0000 UTC m=+146.265945799" watchObservedRunningTime="2025-11-28 13:31:36.144083692 +0000 UTC m=+146.268025119" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.146024 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2gj2l" podStartSLOduration=125.146006157 podStartE2EDuration="2m5.146006157s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.114338477 +0000 UTC m=+146.238279924" watchObservedRunningTime="2025-11-28 13:31:36.146006157 +0000 UTC m=+146.269947604" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.166441 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-lq2rk" podStartSLOduration=124.166421204 podStartE2EDuration="2m4.166421204s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.165005633 +0000 UTC m=+146.288947110" watchObservedRunningTime="2025-11-28 13:31:36.166421204 +0000 UTC m=+146.290362651" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.200853 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" podStartSLOduration=124.200829863 podStartE2EDuration="2m4.200829863s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.199442503 +0000 UTC m=+146.323383950" watchObservedRunningTime="2025-11-28 13:31:36.200829863 +0000 UTC m=+146.324771300" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.230556 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.231074 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.731055071 +0000 UTC m=+146.854996508 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.263162 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" podStartSLOduration=124.263140384 podStartE2EDuration="2m4.263140384s" podCreationTimestamp="2025-11-28 13:29:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.261997881 +0000 UTC m=+146.385939328" watchObservedRunningTime="2025-11-28 13:31:36.263140384 +0000 UTC m=+146.387081821" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.303682 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-n8z9p" podStartSLOduration=125.303661118 podStartE2EDuration="2m5.303661118s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.295319169 +0000 UTC m=+146.419260616" watchObservedRunningTime="2025-11-28 13:31:36.303661118 +0000 UTC m=+146.427602555" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.323474 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-jp48x" podStartSLOduration=125.323454977 podStartE2EDuration="2m5.323454977s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.322348745 +0000 UTC m=+146.446290182" watchObservedRunningTime="2025-11-28 13:31:36.323454977 +0000 UTC m=+146.447396414" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.331660 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.332196 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.832171988 +0000 UTC m=+146.956113425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.394603 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-wzgtn" podStartSLOduration=125.394581602 podStartE2EDuration="2m5.394581602s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.391078481 +0000 UTC m=+146.515019918" watchObservedRunningTime="2025-11-28 13:31:36.394581602 +0000 UTC m=+146.518523039" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.398654 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-ddlrq" podStartSLOduration=125.398631548 podStartE2EDuration="2m5.398631548s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.368698578 +0000 UTC m=+146.492640025" watchObservedRunningTime="2025-11-28 13:31:36.398631548 +0000 UTC m=+146.522572985" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.403080 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-bp9rq" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.436565 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.436901 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:36.936885247 +0000 UTC m=+147.060826684 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.538378 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.538821 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.038799187 +0000 UTC m=+147.162740684 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.594130 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jjd26" podStartSLOduration=10.594108686 podStartE2EDuration="10.594108686s" podCreationTimestamp="2025-11-28 13:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.514335154 +0000 UTC m=+146.638276591" watchObservedRunningTime="2025-11-28 13:31:36.594108686 +0000 UTC m=+146.718050123" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.640147 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.640305 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.140281113 +0000 UTC m=+147.264222550 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.640429 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.640775 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.140764667 +0000 UTC m=+147.264706104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.661369 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-6gf7t" podStartSLOduration=125.661346059 podStartE2EDuration="2m5.661346059s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.649741455 +0000 UTC m=+146.773682892" watchObservedRunningTime="2025-11-28 13:31:36.661346059 +0000 UTC m=+146.785287496" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.662107 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-ghlnz" podStartSLOduration=125.662102661 podStartE2EDuration="2m5.662102661s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.599945434 +0000 UTC m=+146.723886871" watchObservedRunningTime="2025-11-28 13:31:36.662102661 +0000 UTC m=+146.786044098" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.713020 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" podStartSLOduration=125.712999714 podStartE2EDuration="2m5.712999714s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.710761859 +0000 UTC m=+146.834703296" watchObservedRunningTime="2025-11-28 13:31:36.712999714 +0000 UTC m=+146.836941151" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.741688 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.741870 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.241844053 +0000 UTC m=+147.365785490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.742020 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.742399 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.242392398 +0000 UTC m=+147.366333835 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.771781 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.774716 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-6qvft" podStartSLOduration=125.774698557 podStartE2EDuration="2m5.774698557s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.773344818 +0000 UTC m=+146.897286275" watchObservedRunningTime="2025-11-28 13:31:36.774698557 +0000 UTC m=+146.898639994" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.828057 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-v825d" podStartSLOduration=125.82803559 podStartE2EDuration="2m5.82803559s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:36.823025966 +0000 UTC m=+146.946967403" watchObservedRunningTime="2025-11-28 13:31:36.82803559 +0000 UTC m=+146.951977027" Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.843364 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.843544 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.343514585 +0000 UTC m=+147.467456032 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.843710 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.844096 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.344085361 +0000 UTC m=+147.468026848 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.944799 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:36 crc kubenswrapper[4857]: E1128 13:31:36.945233 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.445215498 +0000 UTC m=+147.569156935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.994624 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" event={"ID":"3b437875-4ae6-4455-a281-84281a93fad0","Type":"ContainerStarted","Data":"964dcce1a4c86ee81d921f86f395b5048305c86523dbb42330234cf467e6d777"} Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.995670 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-plzgn container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Nov 28 13:31:36 crc kubenswrapper[4857]: I1128 13:31:36.995715 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" podUID="51677670-528c-40ba-acf4-e9b506e48a84" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.39:8080/healthz\": dial tcp 10.217.0.39:8080: connect: connection refused" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.046338 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.046656 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.546645483 +0000 UTC m=+147.670586920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.059695 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:37 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:37 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:37 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.059781 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.147585 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.147725 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.647704728 +0000 UTC m=+147.771646165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.149112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.149661 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.649647294 +0000 UTC m=+147.773588821 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.250545 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.250785 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.75074885 +0000 UTC m=+147.874690297 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.250902 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.251437 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.751427399 +0000 UTC m=+147.875368836 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.352565 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.352694 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.852675899 +0000 UTC m=+147.976617326 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.352782 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.353091 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.853083651 +0000 UTC m=+147.977025088 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.454588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.454737 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.954715952 +0000 UTC m=+148.078657389 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.455174 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.455560 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:37.955547806 +0000 UTC m=+148.079489243 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.497910 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-77c6r"] Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.499247 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.501496 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.529496 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9sptz" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.556124 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.556351 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.056315322 +0000 UTC m=+148.180256759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.556501 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.556872 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.056858518 +0000 UTC m=+148.180799955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.560469 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-77c6r"] Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.653554 4857 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.659708 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.659887 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-catalog-content\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.659929 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4m4d\" (UniqueName: \"kubernetes.io/projected/d31f352c-2360-4d7e-bb8d-6bfa04257c06-kube-api-access-m4m4d\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.660001 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-utilities\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.660147 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.160130995 +0000 UTC m=+148.284072432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.701026 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j4qb5"] Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.702227 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.714475 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.714609 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j4qb5"] Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.772823 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-catalog-content\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.772916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4m4d\" (UniqueName: \"kubernetes.io/projected/d31f352c-2360-4d7e-bb8d-6bfa04257c06-kube-api-access-m4m4d\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.772986 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.773041 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-utilities\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.776997 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-utilities\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.778543 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-catalog-content\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.784184 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.28415821 +0000 UTC m=+148.408099647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.814650 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4m4d\" (UniqueName: \"kubernetes.io/projected/d31f352c-2360-4d7e-bb8d-6bfa04257c06-kube-api-access-m4m4d\") pod \"community-operators-77c6r\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.824733 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.873921 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.874213 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-catalog-content\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.874301 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wms8x\" (UniqueName: \"kubernetes.io/projected/62de45bc-ae71-4802-858e-2a9ac94455ce-kube-api-access-wms8x\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.874327 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-utilities\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.874431 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.374415354 +0000 UTC m=+148.498356791 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.896200 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-w8khp"] Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.899343 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.917083 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w8khp"] Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976160 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-catalog-content\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976214 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-catalog-content\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976272 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kthj6\" (UniqueName: \"kubernetes.io/projected/71a8af09-48a3-4be5-b08a-7e5381ecca76-kube-api-access-kthj6\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976318 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-utilities\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976406 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wms8x\" (UniqueName: \"kubernetes.io/projected/62de45bc-ae71-4802-858e-2a9ac94455ce-kube-api-access-wms8x\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-utilities\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.976474 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:37 crc kubenswrapper[4857]: E1128 13:31:37.976846 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.476830958 +0000 UTC m=+148.600772395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.977461 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-catalog-content\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:37 crc kubenswrapper[4857]: I1128 13:31:37.977732 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-utilities\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.012987 4857 generic.go:334] "Generic (PLEG): container finished" podID="49fd1de8-3f87-4d33-b209-df83dd1096a8" containerID="039646d2cc0856a7cd5b98a3c85cd50c130f5b53d297446b8ca635aa7ff46c20" exitCode=0 Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.013078 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" event={"ID":"49fd1de8-3f87-4d33-b209-df83dd1096a8","Type":"ContainerDied","Data":"039646d2cc0856a7cd5b98a3c85cd50c130f5b53d297446b8ca635aa7ff46c20"} Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.018204 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" event={"ID":"3b437875-4ae6-4455-a281-84281a93fad0","Type":"ContainerStarted","Data":"649658c53ea28780be791856c74d5bae4d73c927099e1ce4b10d50b1beb72987"} Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.021097 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wms8x\" (UniqueName: \"kubernetes.io/projected/62de45bc-ae71-4802-858e-2a9ac94455ce-kube-api-access-wms8x\") pod \"certified-operators-j4qb5\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.036879 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.062291 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:38 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:38 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:38 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.062349 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.079625 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.079742 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.579722365 +0000 UTC m=+148.703663802 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.079908 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.079966 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.079991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.080041 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.080096 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-catalog-content\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.080128 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kthj6\" (UniqueName: \"kubernetes.io/projected/71a8af09-48a3-4be5-b08a-7e5381ecca76-kube-api-access-kthj6\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.080157 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.080183 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-utilities\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.080701 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-utilities\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.081831 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.581807155 +0000 UTC m=+148.705748662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.085575 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.086646 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-catalog-content\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.091456 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.091718 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.092676 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.096393 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.097278 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2dpsf"] Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.098908 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.115719 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.123229 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kthj6\" (UniqueName: \"kubernetes.io/projected/71a8af09-48a3-4be5-b08a-7e5381ecca76-kube-api-access-kthj6\") pod \"community-operators-w8khp\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.125362 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2dpsf"] Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.181011 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.181742 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.681448189 +0000 UTC m=+148.805389666 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.184399 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.184696 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-catalog-content\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.184836 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64ns6\" (UniqueName: \"kubernetes.io/projected/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-kube-api-access-64ns6\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.185038 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-utilities\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.186986 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.686975168 +0000 UTC m=+148.810916595 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.219071 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.291516 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.291807 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64ns6\" (UniqueName: \"kubernetes.io/projected/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-kube-api-access-64ns6\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.291868 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-utilities\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.291983 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-catalog-content\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.292505 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-catalog-content\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.292600 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.792581513 +0000 UTC m=+148.916522960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.293171 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-utilities\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.322213 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64ns6\" (UniqueName: \"kubernetes.io/projected/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-kube-api-access-64ns6\") pod \"certified-operators-2dpsf\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.346215 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.357305 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.396924 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.397555 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:38.8975423 +0000 UTC m=+149.021483737 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.418324 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-77c6r"] Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.429288 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.430253 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:38 crc kubenswrapper[4857]: W1128 13:31:38.442926 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd31f352c_2360_4d7e_bb8d_6bfa04257c06.slice/crio-73827bf44896e6298b3340a5cb25252982810257be064de53f20edb30bfd0993 WatchSource:0}: Error finding container 73827bf44896e6298b3340a5cb25252982810257be064de53f20edb30bfd0993: Status 404 returned error can't find the container with id 73827bf44896e6298b3340a5cb25252982810257be064de53f20edb30bfd0993 Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.447652 4857 patch_prober.go:28] interesting pod/apiserver-76f77b778f-qp4rc container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]log ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]etcd ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/generic-apiserver-start-informers ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/max-in-flight-filter ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 28 13:31:38 crc kubenswrapper[4857]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 28 13:31:38 crc kubenswrapper[4857]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/project.openshift.io-projectcache ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/openshift.io-startinformers ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 28 13:31:38 crc kubenswrapper[4857]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 28 13:31:38 crc kubenswrapper[4857]: livez check failed Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.447715 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" podUID="5ccc741a-98e5-420d-ad6e-4260ea2abb1f" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.471886 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.500581 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-28 13:31:39.000565461 +0000 UTC m=+149.124506898 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.500598 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.500870 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: E1128 13:31:38.501205 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-28 13:31:39.001198639 +0000 UTC m=+149.125140076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-98dmw" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.544902 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-5c5lq container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.544931 4857 patch_prober.go:28] interesting pod/downloads-7954f5f757-5c5lq container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" start-of-body= Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.544990 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-5c5lq" podUID="1cb29ded-850a-45ff-8201-a991fe779c01" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.545077 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-5c5lq" podUID="1cb29ded-850a-45ff-8201-a991fe779c01" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.17:8080/\": dial tcp 10.217.0.17:8080: connect: connection refused" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.598975 4857 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-28T13:31:37.653819834Z","Handler":null,"Name":""} Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.601795 4857 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.601837 4857 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.602399 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.612871 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 13:31:38 crc kubenswrapper[4857]: W1128 13:31:38.684472 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-37e8e1a5860deb1de673e7c3c97e78cdb89f68b30709b7202921477c133f7542 WatchSource:0}: Error finding container 37e8e1a5860deb1de673e7c3c97e78cdb89f68b30709b7202921477c133f7542: Status 404 returned error can't find the container with id 37e8e1a5860deb1de673e7c3c97e78cdb89f68b30709b7202921477c133f7542 Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.708668 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.717626 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.717729 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.779908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-98dmw\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:38 crc kubenswrapper[4857]: I1128 13:31:38.841281 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.032929 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"dd10652ce0d49f4f67c817ea540d144922ef335b563cfe2a222e60ca7221531a"} Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.039717 4857 generic.go:334] "Generic (PLEG): container finished" podID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerID="13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052" exitCode=0 Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.039833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-77c6r" event={"ID":"d31f352c-2360-4d7e-bb8d-6bfa04257c06","Type":"ContainerDied","Data":"13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052"} Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.039917 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-77c6r" event={"ID":"d31f352c-2360-4d7e-bb8d-6bfa04257c06","Type":"ContainerStarted","Data":"73827bf44896e6298b3340a5cb25252982810257be064de53f20edb30bfd0993"} Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.045671 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ed14d2d7e5794eeb18246b995063eebc5e63fd711b6afce87d98dcc9b64085bd"} Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.045748 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"37e8e1a5860deb1de673e7c3c97e78cdb89f68b30709b7202921477c133f7542"} Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.046502 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.046986 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.051912 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2dpsf"] Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.057691 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" event={"ID":"3b437875-4ae6-4455-a281-84281a93fad0","Type":"ContainerStarted","Data":"9badf4df41cb85bdac64eb55fb5edc9a82c0d8d22bab9d845b01e982a3606e05"} Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.063076 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:39 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:39 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:39 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.063145 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.096080 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-qnqmv" podStartSLOduration=13.096054747 podStartE2EDuration="13.096054747s" podCreationTimestamp="2025-11-28 13:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:39.091727202 +0000 UTC m=+149.215668659" watchObservedRunningTime="2025-11-28 13:31:39.096054747 +0000 UTC m=+149.219996184" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.205499 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j4qb5"] Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.205568 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-w8khp"] Nov 28 13:31:39 crc kubenswrapper[4857]: W1128 13:31:39.211396 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-92853df408448a71fcbb15988ea31566f4c24f8a6dbf731aac8d8275ab398c90 WatchSource:0}: Error finding container 92853df408448a71fcbb15988ea31566f4c24f8a6dbf731aac8d8275ab398c90: Status 404 returned error can't find the container with id 92853df408448a71fcbb15988ea31566f4c24f8a6dbf731aac8d8275ab398c90 Nov 28 13:31:39 crc kubenswrapper[4857]: W1128 13:31:39.223230 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71a8af09_48a3_4be5_b08a_7e5381ecca76.slice/crio-d81f89d4bdccc28deb9514a7749daf069cccbd17d9525ec15113ca198ce906ff WatchSource:0}: Error finding container d81f89d4bdccc28deb9514a7749daf069cccbd17d9525ec15113ca198ce906ff: Status 404 returned error can't find the container with id d81f89d4bdccc28deb9514a7749daf069cccbd17d9525ec15113ca198ce906ff Nov 28 13:31:39 crc kubenswrapper[4857]: W1128 13:31:39.224804 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod62de45bc_ae71_4802_858e_2a9ac94455ce.slice/crio-9b89a3609043e70399476d077cd4771921168bd2d9b112443a8614fc1d604435 WatchSource:0}: Error finding container 9b89a3609043e70399476d077cd4771921168bd2d9b112443a8614fc1d604435: Status 404 returned error can't find the container with id 9b89a3609043e70399476d077cd4771921168bd2d9b112443a8614fc1d604435 Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.327429 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-98dmw"] Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.480108 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.620012 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt4rk\" (UniqueName: \"kubernetes.io/projected/49fd1de8-3f87-4d33-b209-df83dd1096a8-kube-api-access-gt4rk\") pod \"49fd1de8-3f87-4d33-b209-df83dd1096a8\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.620470 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49fd1de8-3f87-4d33-b209-df83dd1096a8-secret-volume\") pod \"49fd1de8-3f87-4d33-b209-df83dd1096a8\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.620522 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume\") pod \"49fd1de8-3f87-4d33-b209-df83dd1096a8\" (UID: \"49fd1de8-3f87-4d33-b209-df83dd1096a8\") " Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.621673 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume" (OuterVolumeSpecName: "config-volume") pod "49fd1de8-3f87-4d33-b209-df83dd1096a8" (UID: "49fd1de8-3f87-4d33-b209-df83dd1096a8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.627285 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49fd1de8-3f87-4d33-b209-df83dd1096a8-kube-api-access-gt4rk" (OuterVolumeSpecName: "kube-api-access-gt4rk") pod "49fd1de8-3f87-4d33-b209-df83dd1096a8" (UID: "49fd1de8-3f87-4d33-b209-df83dd1096a8"). InnerVolumeSpecName "kube-api-access-gt4rk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.627574 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49fd1de8-3f87-4d33-b209-df83dd1096a8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "49fd1de8-3f87-4d33-b209-df83dd1096a8" (UID: "49fd1de8-3f87-4d33-b209-df83dd1096a8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.695487 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gh8mn"] Nov 28 13:31:39 crc kubenswrapper[4857]: E1128 13:31:39.695725 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49fd1de8-3f87-4d33-b209-df83dd1096a8" containerName="collect-profiles" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.695740 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="49fd1de8-3f87-4d33-b209-df83dd1096a8" containerName="collect-profiles" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.695844 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="49fd1de8-3f87-4d33-b209-df83dd1096a8" containerName="collect-profiles" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.696689 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.699336 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.713043 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gh8mn"] Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.721878 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt4rk\" (UniqueName: \"kubernetes.io/projected/49fd1de8-3f87-4d33-b209-df83dd1096a8-kube-api-access-gt4rk\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.721929 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49fd1de8-3f87-4d33-b209-df83dd1096a8-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.721943 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49fd1de8-3f87-4d33-b209-df83dd1096a8-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.823571 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-utilities\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.823626 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-catalog-content\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.823770 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ff8z\" (UniqueName: \"kubernetes.io/projected/0642909d-f900-49c8-919f-921a3fc66ac8-kube-api-access-6ff8z\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.925560 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-utilities\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.925625 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-catalog-content\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.925658 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ff8z\" (UniqueName: \"kubernetes.io/projected/0642909d-f900-49c8-919f-921a3fc66ac8-kube-api-access-6ff8z\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.926510 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-catalog-content\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.926777 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-utilities\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:39 crc kubenswrapper[4857]: I1128 13:31:39.962186 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ff8z\" (UniqueName: \"kubernetes.io/projected/0642909d-f900-49c8-919f-921a3fc66ac8-kube-api-access-6ff8z\") pod \"redhat-marketplace-gh8mn\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.011330 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.061510 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:40 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:40 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:40 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.061595 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.076640 4857 generic.go:334] "Generic (PLEG): container finished" podID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerID="6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02" exitCode=0 Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.077181 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4qb5" event={"ID":"62de45bc-ae71-4802-858e-2a9ac94455ce","Type":"ContainerDied","Data":"6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.077255 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4qb5" event={"ID":"62de45bc-ae71-4802-858e-2a9ac94455ce","Type":"ContainerStarted","Data":"9b89a3609043e70399476d077cd4771921168bd2d9b112443a8614fc1d604435"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.079226 4857 generic.go:334] "Generic (PLEG): container finished" podID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerID="87afcfaecd70ad51ae6516ca1f6e2bcd8db88f557f1fb6825e6cd8bd95feb52a" exitCode=0 Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.080562 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2dpsf" event={"ID":"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44","Type":"ContainerDied","Data":"87afcfaecd70ad51ae6516ca1f6e2bcd8db88f557f1fb6825e6cd8bd95feb52a"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.080624 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2dpsf" event={"ID":"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44","Type":"ContainerStarted","Data":"dba377551a49efe7800c33836748f020d040da4ceeeb0569536a20503dd77d32"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.112522 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4kz5c"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.113917 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.115375 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" event={"ID":"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8","Type":"ContainerStarted","Data":"fb77c744e578a75560342b7129cb73ae7ede546df1dada9abeb9ead237469695"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.115417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" event={"ID":"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8","Type":"ContainerStarted","Data":"092e942ccae4df99da3f9014b6677f2337d88122a89fd44fe32fb4abb00bfa03"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.115635 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.125463 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kz5c"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.129102 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3357479112e0e81c0b6b6c9203d12cb8583919e8724bc92233805c575ce60d6d"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.137391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"ab053d4964a815357fc263283d00f73fab93bcc46865f70e84d64eba27516864"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.137447 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"92853df408448a71fcbb15988ea31566f4c24f8a6dbf731aac8d8275ab398c90"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.139568 4857 generic.go:334] "Generic (PLEG): container finished" podID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerID="36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665" exitCode=0 Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.140497 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerDied","Data":"36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.140535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerStarted","Data":"d81f89d4bdccc28deb9514a7749daf069cccbd17d9525ec15113ca198ce906ff"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.147139 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" event={"ID":"49fd1de8-3f87-4d33-b209-df83dd1096a8","Type":"ContainerDied","Data":"79ca42dd73efffbc9cae44ae8cdff047fcc5b4ec57f30960fd67f42939a0b22b"} Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.147228 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79ca42dd73efffbc9cae44ae8cdff047fcc5b4ec57f30960fd67f42939a0b22b" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.147157 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.176329 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" podStartSLOduration=129.176307095 podStartE2EDuration="2m9.176307095s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:40.174262487 +0000 UTC m=+150.298203944" watchObservedRunningTime="2025-11-28 13:31:40.176307095 +0000 UTC m=+150.300248522" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.231080 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-catalog-content\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.231157 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7bwp\" (UniqueName: \"kubernetes.io/projected/fd79fa7b-1665-4850-a916-7528854be201-kube-api-access-b7bwp\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.231324 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-utilities\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.246352 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.308348 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gh8mn"] Nov 28 13:31:40 crc kubenswrapper[4857]: W1128 13:31:40.323585 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0642909d_f900_49c8_919f_921a3fc66ac8.slice/crio-4ce630cc4f189f8f55a1b3eb3a6857c512f5e096af35c5e8191b2adb6b289aa2 WatchSource:0}: Error finding container 4ce630cc4f189f8f55a1b3eb3a6857c512f5e096af35c5e8191b2adb6b289aa2: Status 404 returned error can't find the container with id 4ce630cc4f189f8f55a1b3eb3a6857c512f5e096af35c5e8191b2adb6b289aa2 Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.333402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-utilities\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.333472 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-catalog-content\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.333516 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7bwp\" (UniqueName: \"kubernetes.io/projected/fd79fa7b-1665-4850-a916-7528854be201-kube-api-access-b7bwp\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.333974 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-utilities\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.334073 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-catalog-content\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.356062 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7bwp\" (UniqueName: \"kubernetes.io/projected/fd79fa7b-1665-4850-a916-7528854be201-kube-api-access-b7bwp\") pod \"redhat-marketplace-4kz5c\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.443913 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.581487 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.584236 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.599215 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.657006 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kz5c"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.698427 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qf98m"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.702831 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.720498 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.729409 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qf98m"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.829912 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.835771 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.837179 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.841204 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.841471 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.849713 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4plbp\" (UniqueName: \"kubernetes.io/projected/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-kube-api-access-4plbp\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.849807 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-utilities\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.849874 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-catalog-content\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.893700 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gkjq4"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.895022 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.901493 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gkjq4"] Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951138 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951196 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951260 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4plbp\" (UniqueName: \"kubernetes.io/projected/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-kube-api-access-4plbp\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951303 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-utilities\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951350 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-catalog-content\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951897 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-catalog-content\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.951904 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-utilities\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:40 crc kubenswrapper[4857]: I1128 13:31:40.979046 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4plbp\" (UniqueName: \"kubernetes.io/projected/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-kube-api-access-4plbp\") pod \"redhat-operators-qf98m\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.007727 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.007811 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.010205 4857 patch_prober.go:28] interesting pod/console-f9d7485db-6gf7t container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.010269 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-6gf7t" podUID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" containerName="console" probeResult="failure" output="Get \"https://10.217.0.13:8443/health\": dial tcp 10.217.0.13:8443: connect: connection refused" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.030595 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.052848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-catalog-content\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.052901 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.052924 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.053014 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpj5h\" (UniqueName: \"kubernetes.io/projected/3622bd49-2e24-4a9e-9647-754e9797c2e1-kube-api-access-dpj5h\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.053042 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-utilities\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.053147 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.056505 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.060723 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:41 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:41 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:41 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.060807 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.083090 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.084021 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.084125 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.087283 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.087842 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.088602 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.153719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-catalog-content\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.153796 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpj5h\" (UniqueName: \"kubernetes.io/projected/3622bd49-2e24-4a9e-9647-754e9797c2e1-kube-api-access-dpj5h\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.153848 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-utilities\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.157012 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-utilities\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.157032 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-catalog-content\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.162870 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd79fa7b-1665-4850-a916-7528854be201" containerID="9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237" exitCode=0 Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.164116 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kz5c" event={"ID":"fd79fa7b-1665-4850-a916-7528854be201","Type":"ContainerDied","Data":"9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237"} Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.164233 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kz5c" event={"ID":"fd79fa7b-1665-4850-a916-7528854be201","Type":"ContainerStarted","Data":"702d31575ed9f1977198413bad8c40e599a55f41384f7638f6abf62d64648fdf"} Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.171927 4857 generic.go:334] "Generic (PLEG): container finished" podID="0642909d-f900-49c8-919f-921a3fc66ac8" containerID="85c0dd82a18e3a7ed179d6d44a93a215dea45da877905b6ed336f6b9a4ea671f" exitCode=0 Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.172333 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gh8mn" event={"ID":"0642909d-f900-49c8-919f-921a3fc66ac8","Type":"ContainerDied","Data":"85c0dd82a18e3a7ed179d6d44a93a215dea45da877905b6ed336f6b9a4ea671f"} Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.172415 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gh8mn" event={"ID":"0642909d-f900-49c8-919f-921a3fc66ac8","Type":"ContainerStarted","Data":"4ce630cc4f189f8f55a1b3eb3a6857c512f5e096af35c5e8191b2adb6b289aa2"} Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.181157 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpj5h\" (UniqueName: \"kubernetes.io/projected/3622bd49-2e24-4a9e-9647-754e9797c2e1-kube-api-access-dpj5h\") pod \"redhat-operators-gkjq4\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.186148 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-76m2l" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.199562 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.217285 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.255834 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce36fef-2acd-4960-b375-4ac54812c81b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.256172 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ce36fef-2acd-4960-b375-4ac54812c81b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.308392 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.308439 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.376041 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce36fef-2acd-4960-b375-4ac54812c81b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.376184 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ce36fef-2acd-4960-b375-4ac54812c81b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.376335 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ce36fef-2acd-4960-b375-4ac54812c81b-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.427627 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce36fef-2acd-4960-b375-4ac54812c81b-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.609548 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qf98m"] Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.720276 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.883369 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 28 13:31:41 crc kubenswrapper[4857]: I1128 13:31:41.973975 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gkjq4"] Nov 28 13:31:42 crc kubenswrapper[4857]: I1128 13:31:42.059744 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:42 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:42 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:42 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:42 crc kubenswrapper[4857]: I1128 13:31:42.059825 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:42 crc kubenswrapper[4857]: W1128 13:31:42.491653 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3622bd49_2e24_4a9e_9647_754e9797c2e1.slice/crio-206d1d6ace55047138b4313560cc22eca18a0108260079edf96d3aa2c922db85 WatchSource:0}: Error finding container 206d1d6ace55047138b4313560cc22eca18a0108260079edf96d3aa2c922db85: Status 404 returned error can't find the container with id 206d1d6ace55047138b4313560cc22eca18a0108260079edf96d3aa2c922db85 Nov 28 13:31:42 crc kubenswrapper[4857]: I1128 13:31:42.500569 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerStarted","Data":"55135305bbcb21018c21607ef5a2b0d1f47a18543544c31964202a2409e2cf0a"} Nov 28 13:31:42 crc kubenswrapper[4857]: W1128 13:31:42.501326 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod712824f4_21ad_419b_bc7e_2e01f3b1f0a9.slice/crio-f688400a333e25192974b2eb1c49f13a9700a7d23dc0de8e201716ea578c2765 WatchSource:0}: Error finding container f688400a333e25192974b2eb1c49f13a9700a7d23dc0de8e201716ea578c2765: Status 404 returned error can't find the container with id f688400a333e25192974b2eb1c49f13a9700a7d23dc0de8e201716ea578c2765 Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.018357 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 28 13:31:43 crc kubenswrapper[4857]: W1128 13:31:43.026859 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod2ce36fef_2acd_4960_b375_4ac54812c81b.slice/crio-9a5031402b5b660fb35358a5c1394d3b218c95418365b69105183f2eb7cfa00e WatchSource:0}: Error finding container 9a5031402b5b660fb35358a5c1394d3b218c95418365b69105183f2eb7cfa00e: Status 404 returned error can't find the container with id 9a5031402b5b660fb35358a5c1394d3b218c95418365b69105183f2eb7cfa00e Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.059794 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:43 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:43 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:43 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.060777 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.434605 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.438903 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-qp4rc" Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.519813 4857 generic.go:334] "Generic (PLEG): container finished" podID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerID="43163b2db9742925d9f9fdfa10b3e3271f2675582794dfd8b59ae33ccfd197d8" exitCode=0 Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.519923 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerDied","Data":"43163b2db9742925d9f9fdfa10b3e3271f2675582794dfd8b59ae33ccfd197d8"} Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.519984 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerStarted","Data":"206d1d6ace55047138b4313560cc22eca18a0108260079edf96d3aa2c922db85"} Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.526486 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"712824f4-21ad-419b-bc7e-2e01f3b1f0a9","Type":"ContainerStarted","Data":"f688400a333e25192974b2eb1c49f13a9700a7d23dc0de8e201716ea578c2765"} Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.531079 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ce36fef-2acd-4960-b375-4ac54812c81b","Type":"ContainerStarted","Data":"9a5031402b5b660fb35358a5c1394d3b218c95418365b69105183f2eb7cfa00e"} Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.535364 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerID="99e157b50976c52db9885bfc9d48b9006a36a74e1293891fbd6455366e696c62" exitCode=0 Nov 28 13:31:43 crc kubenswrapper[4857]: I1128 13:31:43.535432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerDied","Data":"99e157b50976c52db9885bfc9d48b9006a36a74e1293891fbd6455366e696c62"} Nov 28 13:31:44 crc kubenswrapper[4857]: I1128 13:31:44.062469 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 28 13:31:44 crc kubenswrapper[4857]: [-]has-synced failed: reason withheld Nov 28 13:31:44 crc kubenswrapper[4857]: [+]process-running ok Nov 28 13:31:44 crc kubenswrapper[4857]: healthz check failed Nov 28 13:31:44 crc kubenswrapper[4857]: I1128 13:31:44.062537 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 28 13:31:44 crc kubenswrapper[4857]: I1128 13:31:44.546363 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"712824f4-21ad-419b-bc7e-2e01f3b1f0a9","Type":"ContainerStarted","Data":"479b276944c00f9a02773060595c49f4882a761f0e6925348ee30c908f7a2532"} Nov 28 13:31:44 crc kubenswrapper[4857]: I1128 13:31:44.551616 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ce36fef-2acd-4960-b375-4ac54812c81b","Type":"ContainerStarted","Data":"a3bb8b68ae54dc37226f1c328761c3963ed84cab47c7b2c8a4799fe7b11d4572"} Nov 28 13:31:44 crc kubenswrapper[4857]: I1128 13:31:44.563276 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=4.563255035 podStartE2EDuration="4.563255035s" podCreationTimestamp="2025-11-28 13:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:44.562358849 +0000 UTC m=+154.686300306" watchObservedRunningTime="2025-11-28 13:31:44.563255035 +0000 UTC m=+154.687196502" Nov 28 13:31:44 crc kubenswrapper[4857]: I1128 13:31:44.598848 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=3.5988248069999997 podStartE2EDuration="3.598824807s" podCreationTimestamp="2025-11-28 13:31:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:31:44.595213413 +0000 UTC m=+154.719154950" watchObservedRunningTime="2025-11-28 13:31:44.598824807 +0000 UTC m=+154.722766244" Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.064560 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.069294 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-ghlnz" Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.561352 4857 generic.go:334] "Generic (PLEG): container finished" podID="712824f4-21ad-419b-bc7e-2e01f3b1f0a9" containerID="479b276944c00f9a02773060595c49f4882a761f0e6925348ee30c908f7a2532" exitCode=0 Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.561432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"712824f4-21ad-419b-bc7e-2e01f3b1f0a9","Type":"ContainerDied","Data":"479b276944c00f9a02773060595c49f4882a761f0e6925348ee30c908f7a2532"} Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.563229 4857 generic.go:334] "Generic (PLEG): container finished" podID="2ce36fef-2acd-4960-b375-4ac54812c81b" containerID="a3bb8b68ae54dc37226f1c328761c3963ed84cab47c7b2c8a4799fe7b11d4572" exitCode=0 Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.564381 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ce36fef-2acd-4960-b375-4ac54812c81b","Type":"ContainerDied","Data":"a3bb8b68ae54dc37226f1c328761c3963ed84cab47c7b2c8a4799fe7b11d4572"} Nov 28 13:31:45 crc kubenswrapper[4857]: I1128 13:31:45.972464 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jjd26" Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.883153 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.889826 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.987431 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "712824f4-21ad-419b-bc7e-2e01f3b1f0a9" (UID: "712824f4-21ad-419b-bc7e-2e01f3b1f0a9"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.987626 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kubelet-dir\") pod \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.987669 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kube-api-access\") pod \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\" (UID: \"712824f4-21ad-419b-bc7e-2e01f3b1f0a9\") " Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.989259 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:46 crc kubenswrapper[4857]: I1128 13:31:46.998191 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "712824f4-21ad-419b-bc7e-2e01f3b1f0a9" (UID: "712824f4-21ad-419b-bc7e-2e01f3b1f0a9"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.090318 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce36fef-2acd-4960-b375-4ac54812c81b-kube-api-access\") pod \"2ce36fef-2acd-4960-b375-4ac54812c81b\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.090769 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ce36fef-2acd-4960-b375-4ac54812c81b-kubelet-dir\") pod \"2ce36fef-2acd-4960-b375-4ac54812c81b\" (UID: \"2ce36fef-2acd-4960-b375-4ac54812c81b\") " Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.090879 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2ce36fef-2acd-4960-b375-4ac54812c81b-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2ce36fef-2acd-4960-b375-4ac54812c81b" (UID: "2ce36fef-2acd-4960-b375-4ac54812c81b"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.091034 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2ce36fef-2acd-4960-b375-4ac54812c81b-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.091046 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/712824f4-21ad-419b-bc7e-2e01f3b1f0a9-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.096270 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ce36fef-2acd-4960-b375-4ac54812c81b-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2ce36fef-2acd-4960-b375-4ac54812c81b" (UID: "2ce36fef-2acd-4960-b375-4ac54812c81b"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.192101 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ce36fef-2acd-4960-b375-4ac54812c81b-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.605178 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"2ce36fef-2acd-4960-b375-4ac54812c81b","Type":"ContainerDied","Data":"9a5031402b5b660fb35358a5c1394d3b218c95418365b69105183f2eb7cfa00e"} Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.605225 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a5031402b5b660fb35358a5c1394d3b218c95418365b69105183f2eb7cfa00e" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.605276 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.614497 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"712824f4-21ad-419b-bc7e-2e01f3b1f0a9","Type":"ContainerDied","Data":"f688400a333e25192974b2eb1c49f13a9700a7d23dc0de8e201716ea578c2765"} Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.614563 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f688400a333e25192974b2eb1c49f13a9700a7d23dc0de8e201716ea578c2765" Nov 28 13:31:47 crc kubenswrapper[4857]: I1128 13:31:47.614651 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 28 13:31:48 crc kubenswrapper[4857]: I1128 13:31:48.557642 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-5c5lq" Nov 28 13:31:51 crc kubenswrapper[4857]: I1128 13:31:51.013126 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:51 crc kubenswrapper[4857]: I1128 13:31:51.018637 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:31:53 crc kubenswrapper[4857]: I1128 13:31:53.792612 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:53 crc kubenswrapper[4857]: I1128 13:31:53.802996 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0cf43f51-b79b-49fc-85ca-a245a248f27a-metrics-certs\") pod \"network-metrics-daemon-26tq7\" (UID: \"0cf43f51-b79b-49fc-85ca-a245a248f27a\") " pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:54 crc kubenswrapper[4857]: I1128 13:31:54.022851 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-26tq7" Nov 28 13:31:58 crc kubenswrapper[4857]: I1128 13:31:58.847008 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:32:02 crc kubenswrapper[4857]: I1128 13:32:02.097197 4857 patch_prober.go:28] interesting pod/router-default-5444994796-ghlnz container/router namespace/openshift-ingress: Readiness probe status=failure output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 13:32:02 crc kubenswrapper[4857]: I1128 13:32:02.097668 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-ingress/router-default-5444994796-ghlnz" podUID="6c5bcd39-8067-47c3-bc3b-5aa1aed7b516" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:32:08 crc kubenswrapper[4857]: I1128 13:32:08.245317 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 28 13:32:11 crc kubenswrapper[4857]: I1128 13:32:11.165440 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-gdkwk" Nov 28 13:32:11 crc kubenswrapper[4857]: I1128 13:32:11.308825 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:32:11 crc kubenswrapper[4857]: I1128 13:32:11.308924 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:32:17 crc kubenswrapper[4857]: E1128 13:32:17.650027 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 13:32:17 crc kubenswrapper[4857]: E1128 13:32:17.650627 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wms8x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-j4qb5_openshift-marketplace(62de45bc-ae71-4802-858e-2a9ac94455ce): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:17 crc kubenswrapper[4857]: E1128 13:32:17.651810 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-j4qb5" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.076305 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 13:32:18 crc kubenswrapper[4857]: E1128 13:32:18.076589 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ce36fef-2acd-4960-b375-4ac54812c81b" containerName="pruner" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.076601 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ce36fef-2acd-4960-b375-4ac54812c81b" containerName="pruner" Nov 28 13:32:18 crc kubenswrapper[4857]: E1128 13:32:18.076623 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="712824f4-21ad-419b-bc7e-2e01f3b1f0a9" containerName="pruner" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.076629 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="712824f4-21ad-419b-bc7e-2e01f3b1f0a9" containerName="pruner" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.076713 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ce36fef-2acd-4960-b375-4ac54812c81b" containerName="pruner" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.076725 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="712824f4-21ad-419b-bc7e-2e01f3b1f0a9" containerName="pruner" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.077162 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.079744 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.079834 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.079891 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.216753 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15ccd56f-f02e-475e-b700-a557ce80d514-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.216819 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15ccd56f-f02e-475e-b700-a557ce80d514-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.318623 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15ccd56f-f02e-475e-b700-a557ce80d514-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.318695 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15ccd56f-f02e-475e-b700-a557ce80d514-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.318819 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15ccd56f-f02e-475e-b700-a557ce80d514-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.343889 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15ccd56f-f02e-475e-b700-a557ce80d514-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: I1128 13:32:18.405063 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:18 crc kubenswrapper[4857]: E1128 13:32:18.512546 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-j4qb5" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" Nov 28 13:32:18 crc kubenswrapper[4857]: E1128 13:32:18.567292 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 13:32:18 crc kubenswrapper[4857]: E1128 13:32:18.567461 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b7bwp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4kz5c_openshift-marketplace(fd79fa7b-1665-4850-a916-7528854be201): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:18 crc kubenswrapper[4857]: E1128 13:32:18.568641 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4kz5c" podUID="fd79fa7b-1665-4850-a916-7528854be201" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.881424 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-4kz5c" podUID="fd79fa7b-1665-4850-a916-7528854be201" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.939277 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.939556 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m4m4d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-77c6r_openshift-marketplace(d31f352c-2360-4d7e-bb8d-6bfa04257c06): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.940713 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.940743 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-77c6r" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.940882 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kthj6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-w8khp_openshift-marketplace(71a8af09-48a3-4be5-b08a-7e5381ecca76): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.942097 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-w8khp" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.980572 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.980741 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6ff8z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-gh8mn_openshift-marketplace(0642909d-f900-49c8-919f-921a3fc66ac8): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.981912 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-gh8mn" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.986201 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.986553 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-64ns6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-2dpsf_openshift-marketplace(d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:19 crc kubenswrapper[4857]: E1128 13:32:19.988179 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-2dpsf" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.863594 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.869526 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.874489 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.977933 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-var-lock\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.978419 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kube-api-access\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:22 crc kubenswrapper[4857]: I1128 13:32:22.978456 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kubelet-dir\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.080129 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-var-lock\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.080177 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kube-api-access\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.080202 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kubelet-dir\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.080290 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kubelet-dir\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.080305 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-var-lock\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.111172 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kube-api-access\") pod \"installer-9-crc\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.192568 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.205815 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-77c6r" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.205829 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-2dpsf" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.205894 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-gh8mn" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.205998 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-w8khp" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.233430 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.233577 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4plbp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-qf98m_openshift-marketplace(1c8a00dc-9bad-4bb0-8003-9b8eb43299cd): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.234793 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-qf98m" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.779598 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-26tq7"] Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.827318 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerStarted","Data":"588fff7b0806e0821fc1e50a7c79d1e4c3a1d21e932b3b86ebb7474ed5fbbfc0"} Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.831327 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-26tq7" event={"ID":"0cf43f51-b79b-49fc-85ca-a245a248f27a","Type":"ContainerStarted","Data":"452a6c1b7ebe6127de1b5de5435fb8e305f02a481a889957bae114aa36243353"} Nov 28 13:32:23 crc kubenswrapper[4857]: E1128 13:32:23.842963 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-qf98m" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.876015 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 28 13:32:23 crc kubenswrapper[4857]: W1128 13:32:23.884288 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod15ccd56f_f02e_475e_b700_a557ce80d514.slice/crio-dacb11b7a23313ed381cfc5d98300bf33b058e378bd9f258e6b29020fe1a9492 WatchSource:0}: Error finding container dacb11b7a23313ed381cfc5d98300bf33b058e378bd9f258e6b29020fe1a9492: Status 404 returned error can't find the container with id dacb11b7a23313ed381cfc5d98300bf33b058e378bd9f258e6b29020fe1a9492 Nov 28 13:32:23 crc kubenswrapper[4857]: I1128 13:32:23.933793 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.840617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15ccd56f-f02e-475e-b700-a557ce80d514","Type":"ContainerStarted","Data":"fe8ccdbe5fdd04f7b8da85a9765c3a5ded4e0be7b527eb6f9694f6c6f0d22596"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.841000 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15ccd56f-f02e-475e-b700-a557ce80d514","Type":"ContainerStarted","Data":"dacb11b7a23313ed381cfc5d98300bf33b058e378bd9f258e6b29020fe1a9492"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.843723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-26tq7" event={"ID":"0cf43f51-b79b-49fc-85ca-a245a248f27a","Type":"ContainerStarted","Data":"8309827bcb8e6cca82d454d0d21bb9b415006aba9d429a159e0ad2a3280ed823"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.843752 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-26tq7" event={"ID":"0cf43f51-b79b-49fc-85ca-a245a248f27a","Type":"ContainerStarted","Data":"dfd41d0d48ec0a15eeff7e1f5daed0c24fd9c863bff82157df21889d54e8e99c"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.845019 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"0925aef1-cbdf-4f9b-bde9-b18b40e006d4","Type":"ContainerStarted","Data":"3a27b6b4953c2786e2ae9c288a3775d80c3eb05416fed76b9967d63d29244a7b"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.845096 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"0925aef1-cbdf-4f9b-bde9-b18b40e006d4","Type":"ContainerStarted","Data":"bf91ebd55e75dd1af147cae3e2d8284fbbad52773f07d466f89c3260893cb4cd"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.846515 4857 generic.go:334] "Generic (PLEG): container finished" podID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerID="588fff7b0806e0821fc1e50a7c79d1e4c3a1d21e932b3b86ebb7474ed5fbbfc0" exitCode=0 Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.846543 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerDied","Data":"588fff7b0806e0821fc1e50a7c79d1e4c3a1d21e932b3b86ebb7474ed5fbbfc0"} Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.865438 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=6.865422504 podStartE2EDuration="6.865422504s" podCreationTimestamp="2025-11-28 13:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:32:24.859422892 +0000 UTC m=+194.983364319" watchObservedRunningTime="2025-11-28 13:32:24.865422504 +0000 UTC m=+194.989363961" Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.888412 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.888389985 podStartE2EDuration="2.888389985s" podCreationTimestamp="2025-11-28 13:32:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:32:24.880689503 +0000 UTC m=+195.004630930" watchObservedRunningTime="2025-11-28 13:32:24.888389985 +0000 UTC m=+195.012331422" Nov 28 13:32:24 crc kubenswrapper[4857]: I1128 13:32:24.902194 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-26tq7" podStartSLOduration=173.902169311 podStartE2EDuration="2m53.902169311s" podCreationTimestamp="2025-11-28 13:29:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:32:24.897220168 +0000 UTC m=+195.021161595" watchObservedRunningTime="2025-11-28 13:32:24.902169311 +0000 UTC m=+195.026110748" Nov 28 13:32:25 crc kubenswrapper[4857]: I1128 13:32:25.853476 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerStarted","Data":"ce31aafa39b143b23609cc612b0be233ba513531fad7bb862667ddf86aad9255"} Nov 28 13:32:25 crc kubenswrapper[4857]: I1128 13:32:25.855523 4857 generic.go:334] "Generic (PLEG): container finished" podID="15ccd56f-f02e-475e-b700-a557ce80d514" containerID="fe8ccdbe5fdd04f7b8da85a9765c3a5ded4e0be7b527eb6f9694f6c6f0d22596" exitCode=0 Nov 28 13:32:25 crc kubenswrapper[4857]: I1128 13:32:25.855580 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15ccd56f-f02e-475e-b700-a557ce80d514","Type":"ContainerDied","Data":"fe8ccdbe5fdd04f7b8da85a9765c3a5ded4e0be7b527eb6f9694f6c6f0d22596"} Nov 28 13:32:25 crc kubenswrapper[4857]: I1128 13:32:25.879161 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gkjq4" podStartSLOduration=5.114861069 podStartE2EDuration="45.879145111s" podCreationTimestamp="2025-11-28 13:31:40 +0000 UTC" firstStartedPulling="2025-11-28 13:31:44.553982678 +0000 UTC m=+154.677924115" lastFinishedPulling="2025-11-28 13:32:25.31826672 +0000 UTC m=+195.442208157" observedRunningTime="2025-11-28 13:32:25.875386673 +0000 UTC m=+195.999328110" watchObservedRunningTime="2025-11-28 13:32:25.879145111 +0000 UTC m=+196.003086548" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.148265 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.345840 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15ccd56f-f02e-475e-b700-a557ce80d514-kube-api-access\") pod \"15ccd56f-f02e-475e-b700-a557ce80d514\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.345994 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15ccd56f-f02e-475e-b700-a557ce80d514-kubelet-dir\") pod \"15ccd56f-f02e-475e-b700-a557ce80d514\" (UID: \"15ccd56f-f02e-475e-b700-a557ce80d514\") " Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.346152 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/15ccd56f-f02e-475e-b700-a557ce80d514-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "15ccd56f-f02e-475e-b700-a557ce80d514" (UID: "15ccd56f-f02e-475e-b700-a557ce80d514"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.346405 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/15ccd56f-f02e-475e-b700-a557ce80d514-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.355103 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15ccd56f-f02e-475e-b700-a557ce80d514-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "15ccd56f-f02e-475e-b700-a557ce80d514" (UID: "15ccd56f-f02e-475e-b700-a557ce80d514"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.447704 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/15ccd56f-f02e-475e-b700-a557ce80d514-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.867627 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"15ccd56f-f02e-475e-b700-a557ce80d514","Type":"ContainerDied","Data":"dacb11b7a23313ed381cfc5d98300bf33b058e378bd9f258e6b29020fe1a9492"} Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.867678 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dacb11b7a23313ed381cfc5d98300bf33b058e378bd9f258e6b29020fe1a9492" Nov 28 13:32:27 crc kubenswrapper[4857]: I1128 13:32:27.867738 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 28 13:32:31 crc kubenswrapper[4857]: I1128 13:32:31.217935 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:32:31 crc kubenswrapper[4857]: I1128 13:32:31.218449 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:32:32 crc kubenswrapper[4857]: I1128 13:32:32.157109 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:32:32 crc kubenswrapper[4857]: I1128 13:32:32.206002 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:32:32 crc kubenswrapper[4857]: I1128 13:32:32.385837 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gkjq4"] Nov 28 13:32:32 crc kubenswrapper[4857]: I1128 13:32:32.893930 4857 generic.go:334] "Generic (PLEG): container finished" podID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerID="ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019" exitCode=0 Nov 28 13:32:32 crc kubenswrapper[4857]: I1128 13:32:32.893991 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4qb5" event={"ID":"62de45bc-ae71-4802-858e-2a9ac94455ce","Type":"ContainerDied","Data":"ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019"} Nov 28 13:32:33 crc kubenswrapper[4857]: I1128 13:32:33.899652 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gkjq4" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="registry-server" containerID="cri-o://ce31aafa39b143b23609cc612b0be233ba513531fad7bb862667ddf86aad9255" gracePeriod=2 Nov 28 13:32:34 crc kubenswrapper[4857]: I1128 13:32:34.908086 4857 generic.go:334] "Generic (PLEG): container finished" podID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerID="ce31aafa39b143b23609cc612b0be233ba513531fad7bb862667ddf86aad9255" exitCode=0 Nov 28 13:32:34 crc kubenswrapper[4857]: I1128 13:32:34.908295 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerDied","Data":"ce31aafa39b143b23609cc612b0be233ba513531fad7bb862667ddf86aad9255"} Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.808329 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.924391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4qb5" event={"ID":"62de45bc-ae71-4802-858e-2a9ac94455ce","Type":"ContainerStarted","Data":"206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d"} Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.926630 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gkjq4" event={"ID":"3622bd49-2e24-4a9e-9647-754e9797c2e1","Type":"ContainerDied","Data":"206d1d6ace55047138b4313560cc22eca18a0108260079edf96d3aa2c922db85"} Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.926679 4857 scope.go:117] "RemoveContainer" containerID="ce31aafa39b143b23609cc612b0be233ba513531fad7bb862667ddf86aad9255" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.926780 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gkjq4" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.940908 4857 scope.go:117] "RemoveContainer" containerID="588fff7b0806e0821fc1e50a7c79d1e4c3a1d21e932b3b86ebb7474ed5fbbfc0" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.944086 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j4qb5" podStartSLOduration=5.94280131 podStartE2EDuration="1m0.944070587s" podCreationTimestamp="2025-11-28 13:31:37 +0000 UTC" firstStartedPulling="2025-11-28 13:31:40.080737409 +0000 UTC m=+150.204678846" lastFinishedPulling="2025-11-28 13:32:35.082006686 +0000 UTC m=+205.205948123" observedRunningTime="2025-11-28 13:32:37.942116763 +0000 UTC m=+208.066058200" watchObservedRunningTime="2025-11-28 13:32:37.944070587 +0000 UTC m=+208.068012024" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.958636 4857 scope.go:117] "RemoveContainer" containerID="43163b2db9742925d9f9fdfa10b3e3271f2675582794dfd8b59ae33ccfd197d8" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.984648 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-catalog-content\") pod \"3622bd49-2e24-4a9e-9647-754e9797c2e1\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.985069 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-utilities\") pod \"3622bd49-2e24-4a9e-9647-754e9797c2e1\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.985207 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpj5h\" (UniqueName: \"kubernetes.io/projected/3622bd49-2e24-4a9e-9647-754e9797c2e1-kube-api-access-dpj5h\") pod \"3622bd49-2e24-4a9e-9647-754e9797c2e1\" (UID: \"3622bd49-2e24-4a9e-9647-754e9797c2e1\") " Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.985748 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-utilities" (OuterVolumeSpecName: "utilities") pod "3622bd49-2e24-4a9e-9647-754e9797c2e1" (UID: "3622bd49-2e24-4a9e-9647-754e9797c2e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:37 crc kubenswrapper[4857]: I1128 13:32:37.991151 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3622bd49-2e24-4a9e-9647-754e9797c2e1-kube-api-access-dpj5h" (OuterVolumeSpecName: "kube-api-access-dpj5h") pod "3622bd49-2e24-4a9e-9647-754e9797c2e1" (UID: "3622bd49-2e24-4a9e-9647-754e9797c2e1"). InnerVolumeSpecName "kube-api-access-dpj5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.037183 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.037252 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.086794 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpj5h\" (UniqueName: \"kubernetes.io/projected/3622bd49-2e24-4a9e-9647-754e9797c2e1-kube-api-access-dpj5h\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.086827 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.106424 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3622bd49-2e24-4a9e-9647-754e9797c2e1" (UID: "3622bd49-2e24-4a9e-9647-754e9797c2e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.188137 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3622bd49-2e24-4a9e-9647-754e9797c2e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.267935 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gkjq4"] Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.274444 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gkjq4"] Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.933391 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd79fa7b-1665-4850-a916-7528854be201" containerID="c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8" exitCode=0 Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.933450 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kz5c" event={"ID":"fd79fa7b-1665-4850-a916-7528854be201","Type":"ContainerDied","Data":"c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8"} Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.936597 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerStarted","Data":"17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550"} Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.939733 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerStarted","Data":"46bcd1223e6d52f6396d8e13417c01c6715a42841e7a1e07eaac5946d9333c4b"} Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.941493 4857 generic.go:334] "Generic (PLEG): container finished" podID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerID="25af649f9303d6c782e416e7b52a11cf69880a2a80a66619e45b0576cf8bec5b" exitCode=0 Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.941556 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2dpsf" event={"ID":"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44","Type":"ContainerDied","Data":"25af649f9303d6c782e416e7b52a11cf69880a2a80a66619e45b0576cf8bec5b"} Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.946801 4857 generic.go:334] "Generic (PLEG): container finished" podID="0642909d-f900-49c8-919f-921a3fc66ac8" containerID="8d2d6a32f4f09bab0a235a52d7f31166158cb8c2d119a4988ad8f37f02cb0ed6" exitCode=0 Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.946871 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gh8mn" event={"ID":"0642909d-f900-49c8-919f-921a3fc66ac8","Type":"ContainerDied","Data":"8d2d6a32f4f09bab0a235a52d7f31166158cb8c2d119a4988ad8f37f02cb0ed6"} Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.952469 4857 generic.go:334] "Generic (PLEG): container finished" podID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerID="65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a" exitCode=0 Nov 28 13:32:38 crc kubenswrapper[4857]: I1128 13:32:38.952524 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-77c6r" event={"ID":"d31f352c-2360-4d7e-bb8d-6bfa04257c06","Type":"ContainerDied","Data":"65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a"} Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.078192 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-j4qb5" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="registry-server" probeResult="failure" output=< Nov 28 13:32:39 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:32:39 crc kubenswrapper[4857]: > Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.960171 4857 generic.go:334] "Generic (PLEG): container finished" podID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerID="17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550" exitCode=0 Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.960276 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerDied","Data":"17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550"} Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.962687 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerID="46bcd1223e6d52f6396d8e13417c01c6715a42841e7a1e07eaac5946d9333c4b" exitCode=0 Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.962767 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerDied","Data":"46bcd1223e6d52f6396d8e13417c01c6715a42841e7a1e07eaac5946d9333c4b"} Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.967933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2dpsf" event={"ID":"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44","Type":"ContainerStarted","Data":"014fbab795c18fdfd236195ac8722e06bdb1e060e6923ee1cd9030e4eca5b013"} Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.971095 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gh8mn" event={"ID":"0642909d-f900-49c8-919f-921a3fc66ac8","Type":"ContainerStarted","Data":"53c68d091d92bed0ccf1da5024eed145412eed7da3f2d16466b1303b3717855c"} Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.974694 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-77c6r" event={"ID":"d31f352c-2360-4d7e-bb8d-6bfa04257c06","Type":"ContainerStarted","Data":"716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35"} Nov 28 13:32:39 crc kubenswrapper[4857]: I1128 13:32:39.979466 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kz5c" event={"ID":"fd79fa7b-1665-4850-a916-7528854be201","Type":"ContainerStarted","Data":"02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8"} Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.002915 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gh8mn" podStartSLOduration=2.824803928 podStartE2EDuration="1m1.00289745s" podCreationTimestamp="2025-11-28 13:31:39 +0000 UTC" firstStartedPulling="2025-11-28 13:31:41.174564627 +0000 UTC m=+151.298506064" lastFinishedPulling="2025-11-28 13:32:39.352658149 +0000 UTC m=+209.476599586" observedRunningTime="2025-11-28 13:32:39.994537462 +0000 UTC m=+210.118478899" watchObservedRunningTime="2025-11-28 13:32:40.00289745 +0000 UTC m=+210.126838887" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.011937 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.012023 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.019797 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4kz5c" podStartSLOduration=1.781506626 podStartE2EDuration="1m0.019780601s" podCreationTimestamp="2025-11-28 13:31:40 +0000 UTC" firstStartedPulling="2025-11-28 13:31:41.166616119 +0000 UTC m=+151.290557556" lastFinishedPulling="2025-11-28 13:32:39.404890094 +0000 UTC m=+209.528831531" observedRunningTime="2025-11-28 13:32:40.015240987 +0000 UTC m=+210.139182424" watchObservedRunningTime="2025-11-28 13:32:40.019780601 +0000 UTC m=+210.143722038" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.035599 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2dpsf" podStartSLOduration=2.583318806 podStartE2EDuration="1m2.035581132s" podCreationTimestamp="2025-11-28 13:31:38 +0000 UTC" firstStartedPulling="2025-11-28 13:31:40.083886809 +0000 UTC m=+150.207828246" lastFinishedPulling="2025-11-28 13:32:39.536149135 +0000 UTC m=+209.660090572" observedRunningTime="2025-11-28 13:32:40.033839734 +0000 UTC m=+210.157781171" watchObservedRunningTime="2025-11-28 13:32:40.035581132 +0000 UTC m=+210.159522569" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.064280 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-77c6r" podStartSLOduration=2.641872248 podStartE2EDuration="1m3.064264905s" podCreationTimestamp="2025-11-28 13:31:37 +0000 UTC" firstStartedPulling="2025-11-28 13:31:39.046613156 +0000 UTC m=+149.170554593" lastFinishedPulling="2025-11-28 13:32:39.469005813 +0000 UTC m=+209.592947250" observedRunningTime="2025-11-28 13:32:40.061334855 +0000 UTC m=+210.185276322" watchObservedRunningTime="2025-11-28 13:32:40.064264905 +0000 UTC m=+210.188206342" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.245353 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" path="/var/lib/kubelet/pods/3622bd49-2e24-4a9e-9647-754e9797c2e1/volumes" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.445030 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:32:40 crc kubenswrapper[4857]: I1128 13:32:40.445141 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.054611 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-gh8mn" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="registry-server" probeResult="failure" output=< Nov 28 13:32:41 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:32:41 crc kubenswrapper[4857]: > Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.308489 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.308549 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.308622 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.309172 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.309281 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704" gracePeriod=600 Nov 28 13:32:41 crc kubenswrapper[4857]: I1128 13:32:41.486444 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-4kz5c" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="registry-server" probeResult="failure" output=< Nov 28 13:32:41 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:32:41 crc kubenswrapper[4857]: > Nov 28 13:32:42 crc kubenswrapper[4857]: I1128 13:32:42.996664 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704"} Nov 28 13:32:42 crc kubenswrapper[4857]: I1128 13:32:42.996597 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704" exitCode=0 Nov 28 13:32:45 crc kubenswrapper[4857]: I1128 13:32:45.009014 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"f4ddd2633e38405419b400aeb1d375cfd7367412908feb90f94fc4920c65c449"} Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.019469 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerStarted","Data":"77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df"} Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.022156 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerStarted","Data":"71cb86221f1211097136652ea505662bcbd330b8e575189c88dd298802458c93"} Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.040105 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-w8khp" podStartSLOduration=4.124217358 podStartE2EDuration="1m10.040085426s" podCreationTimestamp="2025-11-28 13:31:37 +0000 UTC" firstStartedPulling="2025-11-28 13:31:40.141616528 +0000 UTC m=+150.265557965" lastFinishedPulling="2025-11-28 13:32:46.057484596 +0000 UTC m=+216.181426033" observedRunningTime="2025-11-28 13:32:47.035495661 +0000 UTC m=+217.159437108" watchObservedRunningTime="2025-11-28 13:32:47.040085426 +0000 UTC m=+217.164026863" Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.052340 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qf98m" podStartSLOduration=4.578748955 podStartE2EDuration="1m7.05231974s" podCreationTimestamp="2025-11-28 13:31:40 +0000 UTC" firstStartedPulling="2025-11-28 13:31:43.539348685 +0000 UTC m=+153.663290122" lastFinishedPulling="2025-11-28 13:32:46.01291948 +0000 UTC m=+216.136860907" observedRunningTime="2025-11-28 13:32:47.048926037 +0000 UTC m=+217.172867474" watchObservedRunningTime="2025-11-28 13:32:47.05231974 +0000 UTC m=+217.176261177" Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.825183 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.825242 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:32:47 crc kubenswrapper[4857]: I1128 13:32:47.866388 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.069322 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.080004 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.131511 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.220463 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.220514 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.260574 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.472965 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.473263 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:32:48 crc kubenswrapper[4857]: I1128 13:32:48.512842 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:32:49 crc kubenswrapper[4857]: I1128 13:32:49.073480 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.050262 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.092206 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.478408 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2dpsf"] Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.496244 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:32:50 crc kubenswrapper[4857]: I1128 13:32:50.538376 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.031476 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.031530 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:32:51 crc kubenswrapper[4857]: I1128 13:32:51.040102 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2dpsf" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="registry-server" containerID="cri-o://014fbab795c18fdfd236195ac8722e06bdb1e060e6923ee1cd9030e4eca5b013" gracePeriod=2 Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.047316 4857 generic.go:334] "Generic (PLEG): container finished" podID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerID="014fbab795c18fdfd236195ac8722e06bdb1e060e6923ee1cd9030e4eca5b013" exitCode=0 Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.047362 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2dpsf" event={"ID":"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44","Type":"ContainerDied","Data":"014fbab795c18fdfd236195ac8722e06bdb1e060e6923ee1cd9030e4eca5b013"} Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.070484 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qf98m" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="registry-server" probeResult="failure" output=< Nov 28 13:32:52 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:32:52 crc kubenswrapper[4857]: > Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.415027 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.566381 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-catalog-content\") pod \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.566646 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-utilities\") pod \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.566688 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64ns6\" (UniqueName: \"kubernetes.io/projected/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-kube-api-access-64ns6\") pod \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\" (UID: \"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44\") " Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.568040 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-utilities" (OuterVolumeSpecName: "utilities") pod "d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" (UID: "d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.574120 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-kube-api-access-64ns6" (OuterVolumeSpecName: "kube-api-access-64ns6") pod "d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" (UID: "d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44"). InnerVolumeSpecName "kube-api-access-64ns6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.626140 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" (UID: "d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.668178 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64ns6\" (UniqueName: \"kubernetes.io/projected/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-kube-api-access-64ns6\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.668244 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.668259 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.852350 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kz5c"] Nov 28 13:32:52 crc kubenswrapper[4857]: I1128 13:32:52.852651 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4kz5c" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="registry-server" containerID="cri-o://02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8" gracePeriod=2 Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.054212 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2dpsf" event={"ID":"d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44","Type":"ContainerDied","Data":"dba377551a49efe7800c33836748f020d040da4ceeeb0569536a20503dd77d32"} Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.054263 4857 scope.go:117] "RemoveContainer" containerID="014fbab795c18fdfd236195ac8722e06bdb1e060e6923ee1cd9030e4eca5b013" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.054370 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2dpsf" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.070870 4857 scope.go:117] "RemoveContainer" containerID="25af649f9303d6c782e416e7b52a11cf69880a2a80a66619e45b0576cf8bec5b" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.083412 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2dpsf"] Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.086401 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2dpsf"] Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.107853 4857 scope.go:117] "RemoveContainer" containerID="87afcfaecd70ad51ae6516ca1f6e2bcd8db88f557f1fb6825e6cd8bd95feb52a" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.717144 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.882607 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7bwp\" (UniqueName: \"kubernetes.io/projected/fd79fa7b-1665-4850-a916-7528854be201-kube-api-access-b7bwp\") pod \"fd79fa7b-1665-4850-a916-7528854be201\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.882725 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-catalog-content\") pod \"fd79fa7b-1665-4850-a916-7528854be201\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.882774 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-utilities\") pod \"fd79fa7b-1665-4850-a916-7528854be201\" (UID: \"fd79fa7b-1665-4850-a916-7528854be201\") " Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.883780 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-utilities" (OuterVolumeSpecName: "utilities") pod "fd79fa7b-1665-4850-a916-7528854be201" (UID: "fd79fa7b-1665-4850-a916-7528854be201"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.901388 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd79fa7b-1665-4850-a916-7528854be201" (UID: "fd79fa7b-1665-4850-a916-7528854be201"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.906241 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd79fa7b-1665-4850-a916-7528854be201-kube-api-access-b7bwp" (OuterVolumeSpecName: "kube-api-access-b7bwp") pod "fd79fa7b-1665-4850-a916-7528854be201" (UID: "fd79fa7b-1665-4850-a916-7528854be201"). InnerVolumeSpecName "kube-api-access-b7bwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.984755 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7bwp\" (UniqueName: \"kubernetes.io/projected/fd79fa7b-1665-4850-a916-7528854be201-kube-api-access-b7bwp\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.984797 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:53 crc kubenswrapper[4857]: I1128 13:32:53.984810 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd79fa7b-1665-4850-a916-7528854be201-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.060989 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd79fa7b-1665-4850-a916-7528854be201" containerID="02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8" exitCode=0 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.061052 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kz5c" event={"ID":"fd79fa7b-1665-4850-a916-7528854be201","Type":"ContainerDied","Data":"02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8"} Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.061081 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4kz5c" event={"ID":"fd79fa7b-1665-4850-a916-7528854be201","Type":"ContainerDied","Data":"702d31575ed9f1977198413bad8c40e599a55f41384f7638f6abf62d64648fdf"} Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.061097 4857 scope.go:117] "RemoveContainer" containerID="02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.061180 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4kz5c" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.085444 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kz5c"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.088289 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4kz5c"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.093134 4857 scope.go:117] "RemoveContainer" containerID="c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.128659 4857 scope.go:117] "RemoveContainer" containerID="9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.146723 4857 scope.go:117] "RemoveContainer" containerID="02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.147276 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8\": container with ID starting with 02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8 not found: ID does not exist" containerID="02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.147314 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8"} err="failed to get container status \"02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8\": rpc error: code = NotFound desc = could not find container \"02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8\": container with ID starting with 02261911e8415b8696e6500d4466496043dec46316e8979019e03f29c7e114c8 not found: ID does not exist" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.147342 4857 scope.go:117] "RemoveContainer" containerID="c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.147738 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8\": container with ID starting with c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8 not found: ID does not exist" containerID="c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.147808 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8"} err="failed to get container status \"c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8\": rpc error: code = NotFound desc = could not find container \"c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8\": container with ID starting with c34679e5f0afaa1409d8f6c9467edf5d91a2d20d77eda421806ae448690aacb8 not found: ID does not exist" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.147867 4857 scope.go:117] "RemoveContainer" containerID="9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.148313 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237\": container with ID starting with 9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237 not found: ID does not exist" containerID="9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.148428 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237"} err="failed to get container status \"9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237\": rpc error: code = NotFound desc = could not find container \"9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237\": container with ID starting with 9926589284d3c1a3b050d195bffcb9ef298753462e9d442286daff63296c6237 not found: ID does not exist" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.247409 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" path="/var/lib/kubelet/pods/d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44/volumes" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.249245 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd79fa7b-1665-4850-a916-7528854be201" path="/var/lib/kubelet/pods/fd79fa7b-1665-4850-a916-7528854be201/volumes" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.572402 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j4qb5"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.575006 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j4qb5" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="registry-server" containerID="cri-o://206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d" gracePeriod=30 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.585228 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-77c6r"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.585598 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-77c6r" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="registry-server" containerID="cri-o://716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35" gracePeriod=30 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.597857 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w8khp"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.598128 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-w8khp" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="registry-server" containerID="cri-o://77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" gracePeriod=30 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.602048 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plzgn"] Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.603129 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.604404 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.605319 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" cmd=["grpc_health_probe","-addr=:50051"] Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.605352 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/community-operators-w8khp" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.607368 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" podUID="51677670-528c-40ba-acf4-e9b506e48a84" containerName="marketplace-operator" containerID="cri-o://9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f" gracePeriod=30 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.620545 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gh8mn"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.620848 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gh8mn" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="registry-server" containerID="cri-o://53c68d091d92bed0ccf1da5024eed145412eed7da3f2d16466b1303b3717855c" gracePeriod=30 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.626038 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nx7ct"] Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628367 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="extract-utilities" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628393 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="extract-utilities" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628408 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628415 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628427 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="extract-content" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628436 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="extract-content" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628447 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15ccd56f-f02e-475e-b700-a557ce80d514" containerName="pruner" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628453 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="15ccd56f-f02e-475e-b700-a557ce80d514" containerName="pruner" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628463 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="extract-content" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628471 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="extract-content" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628482 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628488 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628497 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="extract-utilities" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628503 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="extract-utilities" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628512 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="extract-content" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628520 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="extract-content" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628531 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628538 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: E1128 13:32:54.628549 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="extract-utilities" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628556 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="extract-utilities" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628683 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="15ccd56f-f02e-475e-b700-a557ce80d514" containerName="pruner" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628698 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3622bd49-2e24-4a9e-9647-754e9797c2e1" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628709 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd79fa7b-1665-4850-a916-7528854be201" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.628719 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6b91a3f-eb7e-4ee1-a219-17f7e9b61a44" containerName="registry-server" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.629761 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.636413 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qf98m"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.636479 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nx7ct"] Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.636703 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qf98m" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="registry-server" containerID="cri-o://71cb86221f1211097136652ea505662bcbd330b8e575189c88dd298802458c93" gracePeriod=30 Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.798229 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvfhn\" (UniqueName: \"kubernetes.io/projected/de8315dd-951d-4fe6-a8a9-bc4dd3094743-kube-api-access-nvfhn\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.798298 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.798326 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.899762 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvfhn\" (UniqueName: \"kubernetes.io/projected/de8315dd-951d-4fe6-a8a9-bc4dd3094743-kube-api-access-nvfhn\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.899846 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.899884 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.901095 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.907687 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:54 crc kubenswrapper[4857]: I1128 13:32:54.921866 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvfhn\" (UniqueName: \"kubernetes.io/projected/de8315dd-951d-4fe6-a8a9-bc4dd3094743-kube-api-access-nvfhn\") pod \"marketplace-operator-79b997595-nx7ct\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.031215 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.035683 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.074538 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.078436 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.085743 4857 generic.go:334] "Generic (PLEG): container finished" podID="0642909d-f900-49c8-919f-921a3fc66ac8" containerID="53c68d091d92bed0ccf1da5024eed145412eed7da3f2d16466b1303b3717855c" exitCode=0 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.085844 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gh8mn" event={"ID":"0642909d-f900-49c8-919f-921a3fc66ac8","Type":"ContainerDied","Data":"53c68d091d92bed0ccf1da5024eed145412eed7da3f2d16466b1303b3717855c"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.096037 4857 generic.go:334] "Generic (PLEG): container finished" podID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerID="716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35" exitCode=0 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.096129 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-77c6r" event={"ID":"d31f352c-2360-4d7e-bb8d-6bfa04257c06","Type":"ContainerDied","Data":"716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.096155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-77c6r" event={"ID":"d31f352c-2360-4d7e-bb8d-6bfa04257c06","Type":"ContainerDied","Data":"73827bf44896e6298b3340a5cb25252982810257be064de53f20edb30bfd0993"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.096171 4857 scope.go:117] "RemoveContainer" containerID="716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.096268 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-77c6r" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.151777 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.162285 4857 generic.go:334] "Generic (PLEG): container finished" podID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" exitCode=0 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.162402 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerDied","Data":"77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.162435 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-w8khp" event={"ID":"71a8af09-48a3-4be5-b08a-7e5381ecca76","Type":"ContainerDied","Data":"d81f89d4bdccc28deb9514a7749daf069cccbd17d9525ec15113ca198ce906ff"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.162524 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-w8khp" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.182317 4857 scope.go:117] "RemoveContainer" containerID="65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.187291 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerID="71cb86221f1211097136652ea505662bcbd330b8e575189c88dd298802458c93" exitCode=0 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.187349 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerDied","Data":"71cb86221f1211097136652ea505662bcbd330b8e575189c88dd298802458c93"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.195545 4857 generic.go:334] "Generic (PLEG): container finished" podID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerID="206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d" exitCode=0 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.195617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4qb5" event={"ID":"62de45bc-ae71-4802-858e-2a9ac94455ce","Type":"ContainerDied","Data":"206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.195643 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j4qb5" event={"ID":"62de45bc-ae71-4802-858e-2a9ac94455ce","Type":"ContainerDied","Data":"9b89a3609043e70399476d077cd4771921168bd2d9b112443a8614fc1d604435"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.195685 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j4qb5" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.196011 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.197554 4857 generic.go:334] "Generic (PLEG): container finished" podID="51677670-528c-40ba-acf4-e9b506e48a84" containerID="9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f" exitCode=0 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.197596 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" event={"ID":"51677670-528c-40ba-acf4-e9b506e48a84","Type":"ContainerDied","Data":"9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.197625 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" event={"ID":"51677670-528c-40ba-acf4-e9b506e48a84","Type":"ContainerDied","Data":"121ea7cdf6912ff4fd343a3df81f8cc746720a393778452fd798eeea488b42ac"} Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.197647 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-plzgn" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.207895 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211605 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-utilities\") pod \"71a8af09-48a3-4be5-b08a-7e5381ecca76\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211654 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kthj6\" (UniqueName: \"kubernetes.io/projected/71a8af09-48a3-4be5-b08a-7e5381ecca76-kube-api-access-kthj6\") pod \"71a8af09-48a3-4be5-b08a-7e5381ecca76\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211707 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-utilities\") pod \"62de45bc-ae71-4802-858e-2a9ac94455ce\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211745 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4m4d\" (UniqueName: \"kubernetes.io/projected/d31f352c-2360-4d7e-bb8d-6bfa04257c06-kube-api-access-m4m4d\") pod \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211805 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wms8x\" (UniqueName: \"kubernetes.io/projected/62de45bc-ae71-4802-858e-2a9ac94455ce-kube-api-access-wms8x\") pod \"62de45bc-ae71-4802-858e-2a9ac94455ce\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211835 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-utilities\") pod \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211855 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-catalog-content\") pod \"71a8af09-48a3-4be5-b08a-7e5381ecca76\" (UID: \"71a8af09-48a3-4be5-b08a-7e5381ecca76\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-catalog-content\") pod \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\" (UID: \"d31f352c-2360-4d7e-bb8d-6bfa04257c06\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.211905 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-catalog-content\") pod \"62de45bc-ae71-4802-858e-2a9ac94455ce\" (UID: \"62de45bc-ae71-4802-858e-2a9ac94455ce\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.213201 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-utilities" (OuterVolumeSpecName: "utilities") pod "d31f352c-2360-4d7e-bb8d-6bfa04257c06" (UID: "d31f352c-2360-4d7e-bb8d-6bfa04257c06"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.218313 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71a8af09-48a3-4be5-b08a-7e5381ecca76-kube-api-access-kthj6" (OuterVolumeSpecName: "kube-api-access-kthj6") pod "71a8af09-48a3-4be5-b08a-7e5381ecca76" (UID: "71a8af09-48a3-4be5-b08a-7e5381ecca76"). InnerVolumeSpecName "kube-api-access-kthj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.219423 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-utilities" (OuterVolumeSpecName: "utilities") pod "71a8af09-48a3-4be5-b08a-7e5381ecca76" (UID: "71a8af09-48a3-4be5-b08a-7e5381ecca76"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.223515 4857 scope.go:117] "RemoveContainer" containerID="13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.224924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-utilities" (OuterVolumeSpecName: "utilities") pod "62de45bc-ae71-4802-858e-2a9ac94455ce" (UID: "62de45bc-ae71-4802-858e-2a9ac94455ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.228264 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d31f352c-2360-4d7e-bb8d-6bfa04257c06-kube-api-access-m4m4d" (OuterVolumeSpecName: "kube-api-access-m4m4d") pod "d31f352c-2360-4d7e-bb8d-6bfa04257c06" (UID: "d31f352c-2360-4d7e-bb8d-6bfa04257c06"). InnerVolumeSpecName "kube-api-access-m4m4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.232914 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62de45bc-ae71-4802-858e-2a9ac94455ce-kube-api-access-wms8x" (OuterVolumeSpecName: "kube-api-access-wms8x") pod "62de45bc-ae71-4802-858e-2a9ac94455ce" (UID: "62de45bc-ae71-4802-858e-2a9ac94455ce"). InnerVolumeSpecName "kube-api-access-wms8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.248639 4857 scope.go:117] "RemoveContainer" containerID="716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.249935 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35\": container with ID starting with 716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35 not found: ID does not exist" containerID="716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.250065 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35"} err="failed to get container status \"716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35\": rpc error: code = NotFound desc = could not find container \"716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35\": container with ID starting with 716069339d4d31658a8d1e4d98cf673bc77d9ad7fb3435290348b5014255ab35 not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.250099 4857 scope.go:117] "RemoveContainer" containerID="65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.251370 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a\": container with ID starting with 65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a not found: ID does not exist" containerID="65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.251428 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a"} err="failed to get container status \"65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a\": rpc error: code = NotFound desc = could not find container \"65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a\": container with ID starting with 65adce270adcb525d9dd7c5488f4c765885e23017317895e548629ef7739df4a not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.251453 4857 scope.go:117] "RemoveContainer" containerID="13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.252460 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052\": container with ID starting with 13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052 not found: ID does not exist" containerID="13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.252489 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052"} err="failed to get container status \"13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052\": rpc error: code = NotFound desc = could not find container \"13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052\": container with ID starting with 13c25e4592278c57d546b772c25e071fabaeca190930ce223ee389966a5dd052 not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.252511 4857 scope.go:117] "RemoveContainer" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.276008 4857 scope.go:117] "RemoveContainer" containerID="17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.282268 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "71a8af09-48a3-4be5-b08a-7e5381ecca76" (UID: "71a8af09-48a3-4be5-b08a-7e5381ecca76"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.290395 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62de45bc-ae71-4802-858e-2a9ac94455ce" (UID: "62de45bc-ae71-4802-858e-2a9ac94455ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.292274 4857 scope.go:117] "RemoveContainer" containerID="36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.302527 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d31f352c-2360-4d7e-bb8d-6bfa04257c06" (UID: "d31f352c-2360-4d7e-bb8d-6bfa04257c06"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.312979 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-utilities\") pod \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313023 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-catalog-content\") pod \"0642909d-f900-49c8-919f-921a3fc66ac8\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313122 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ff8z\" (UniqueName: \"kubernetes.io/projected/0642909d-f900-49c8-919f-921a3fc66ac8-kube-api-access-6ff8z\") pod \"0642909d-f900-49c8-919f-921a3fc66ac8\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313178 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mvbr\" (UniqueName: \"kubernetes.io/projected/51677670-528c-40ba-acf4-e9b506e48a84-kube-api-access-8mvbr\") pod \"51677670-528c-40ba-acf4-e9b506e48a84\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313207 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-trusted-ca\") pod \"51677670-528c-40ba-acf4-e9b506e48a84\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313238 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-operator-metrics\") pod \"51677670-528c-40ba-acf4-e9b506e48a84\" (UID: \"51677670-528c-40ba-acf4-e9b506e48a84\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313259 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4plbp\" (UniqueName: \"kubernetes.io/projected/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-kube-api-access-4plbp\") pod \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313278 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-catalog-content\") pod \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\" (UID: \"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-utilities\") pod \"0642909d-f900-49c8-919f-921a3fc66ac8\" (UID: \"0642909d-f900-49c8-919f-921a3fc66ac8\") " Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313537 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4m4d\" (UniqueName: \"kubernetes.io/projected/d31f352c-2360-4d7e-bb8d-6bfa04257c06-kube-api-access-m4m4d\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313554 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wms8x\" (UniqueName: \"kubernetes.io/projected/62de45bc-ae71-4802-858e-2a9ac94455ce-kube-api-access-wms8x\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313566 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313575 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313584 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d31f352c-2360-4d7e-bb8d-6bfa04257c06-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313595 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313605 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/71a8af09-48a3-4be5-b08a-7e5381ecca76-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313616 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kthj6\" (UniqueName: \"kubernetes.io/projected/71a8af09-48a3-4be5-b08a-7e5381ecca76-kube-api-access-kthj6\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.313628 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62de45bc-ae71-4802-858e-2a9ac94455ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.314269 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-utilities" (OuterVolumeSpecName: "utilities") pod "0642909d-f900-49c8-919f-921a3fc66ac8" (UID: "0642909d-f900-49c8-919f-921a3fc66ac8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.314894 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-utilities" (OuterVolumeSpecName: "utilities") pod "1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" (UID: "1c8a00dc-9bad-4bb0-8003-9b8eb43299cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.319284 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "51677670-528c-40ba-acf4-e9b506e48a84" (UID: "51677670-528c-40ba-acf4-e9b506e48a84"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.320266 4857 scope.go:117] "RemoveContainer" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.320495 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51677670-528c-40ba-acf4-e9b506e48a84-kube-api-access-8mvbr" (OuterVolumeSpecName: "kube-api-access-8mvbr") pod "51677670-528c-40ba-acf4-e9b506e48a84" (UID: "51677670-528c-40ba-acf4-e9b506e48a84"). InnerVolumeSpecName "kube-api-access-8mvbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.320732 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df\": container with ID starting with 77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df not found: ID does not exist" containerID="77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.320772 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df"} err="failed to get container status \"77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df\": rpc error: code = NotFound desc = could not find container \"77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df\": container with ID starting with 77f6004d97d7dedc017a5790b625ca046fd76fab55fab60c32e0a17919c753df not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.320801 4857 scope.go:117] "RemoveContainer" containerID="17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.321086 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550\": container with ID starting with 17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550 not found: ID does not exist" containerID="17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.321115 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550"} err="failed to get container status \"17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550\": rpc error: code = NotFound desc = could not find container \"17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550\": container with ID starting with 17c1ca4b14b6790c7f45c5b8099ad54df6a4e11afa491515b2dc28a6c0875550 not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.321132 4857 scope.go:117] "RemoveContainer" containerID="36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.321349 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665\": container with ID starting with 36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665 not found: ID does not exist" containerID="36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.321376 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665"} err="failed to get container status \"36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665\": rpc error: code = NotFound desc = could not find container \"36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665\": container with ID starting with 36a6fe64a2d3d16dc593460409a2b9ca4f05e85eb89e328410534ea4c25d3665 not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.321400 4857 scope.go:117] "RemoveContainer" containerID="206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.321793 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-kube-api-access-4plbp" (OuterVolumeSpecName: "kube-api-access-4plbp") pod "1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" (UID: "1c8a00dc-9bad-4bb0-8003-9b8eb43299cd"). InnerVolumeSpecName "kube-api-access-4plbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.322402 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "51677670-528c-40ba-acf4-e9b506e48a84" (UID: "51677670-528c-40ba-acf4-e9b506e48a84"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.324103 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0642909d-f900-49c8-919f-921a3fc66ac8-kube-api-access-6ff8z" (OuterVolumeSpecName: "kube-api-access-6ff8z") pod "0642909d-f900-49c8-919f-921a3fc66ac8" (UID: "0642909d-f900-49c8-919f-921a3fc66ac8"). InnerVolumeSpecName "kube-api-access-6ff8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.336249 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0642909d-f900-49c8-919f-921a3fc66ac8" (UID: "0642909d-f900-49c8-919f-921a3fc66ac8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.341480 4857 scope.go:117] "RemoveContainer" containerID="ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.365362 4857 scope.go:117] "RemoveContainer" containerID="6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.379939 4857 scope.go:117] "RemoveContainer" containerID="206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.380385 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d\": container with ID starting with 206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d not found: ID does not exist" containerID="206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.380419 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d"} err="failed to get container status \"206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d\": rpc error: code = NotFound desc = could not find container \"206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d\": container with ID starting with 206604e60dbe468d10f0cce02adf6ccdfc784bc473b8a4f1d261d3842459447d not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.380444 4857 scope.go:117] "RemoveContainer" containerID="ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.380764 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019\": container with ID starting with ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019 not found: ID does not exist" containerID="ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.380786 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019"} err="failed to get container status \"ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019\": rpc error: code = NotFound desc = could not find container \"ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019\": container with ID starting with ee42b773cd8cbd683d4e5800da4eab2e0e444740008a32c08b5019b5e39cd019 not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.380801 4857 scope.go:117] "RemoveContainer" containerID="6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.381229 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02\": container with ID starting with 6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02 not found: ID does not exist" containerID="6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.381253 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02"} err="failed to get container status \"6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02\": rpc error: code = NotFound desc = could not find container \"6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02\": container with ID starting with 6e4e430a4a5dbb73fb0d10eb93118efaa31135b7bf57e57adfe53f75b2ad1e02 not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.381268 4857 scope.go:117] "RemoveContainer" containerID="9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415047 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415077 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0642909d-f900-49c8-919f-921a3fc66ac8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415087 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415097 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ff8z\" (UniqueName: \"kubernetes.io/projected/0642909d-f900-49c8-919f-921a3fc66ac8-kube-api-access-6ff8z\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415107 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mvbr\" (UniqueName: \"kubernetes.io/projected/51677670-528c-40ba-acf4-e9b506e48a84-kube-api-access-8mvbr\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415117 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415125 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/51677670-528c-40ba-acf4-e9b506e48a84-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.415134 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4plbp\" (UniqueName: \"kubernetes.io/projected/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-kube-api-access-4plbp\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.426276 4857 scope.go:117] "RemoveContainer" containerID="9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f" Nov 28 13:32:55 crc kubenswrapper[4857]: E1128 13:32:55.427121 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f\": container with ID starting with 9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f not found: ID does not exist" containerID="9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.427163 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f"} err="failed to get container status \"9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f\": rpc error: code = NotFound desc = could not find container \"9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f\": container with ID starting with 9bed02586a31ead7aba25f3670f316c1f4bbe268eadecef6bd0800dde0cbb02f not found: ID does not exist" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.444338 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-77c6r"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.447246 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" (UID: "1c8a00dc-9bad-4bb0-8003-9b8eb43299cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.453026 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-77c6r"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.499221 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-w8khp"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.507061 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-w8khp"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.519626 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.541504 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nx7ct"] Nov 28 13:32:55 crc kubenswrapper[4857]: W1128 13:32:55.557616 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde8315dd_951d_4fe6_a8a9_bc4dd3094743.slice/crio-18bdafd44f937d256a1aeb1c468ab5a42d71efa287ebdb491ac742902ebd1482 WatchSource:0}: Error finding container 18bdafd44f937d256a1aeb1c468ab5a42d71efa287ebdb491ac742902ebd1482: Status 404 returned error can't find the container with id 18bdafd44f937d256a1aeb1c468ab5a42d71efa287ebdb491ac742902ebd1482 Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.574879 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plzgn"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.576680 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-plzgn"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.586700 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j4qb5"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.598454 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j4qb5"] Nov 28 13:32:55 crc kubenswrapper[4857]: I1128 13:32:55.603408 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-m658x"] Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.204661 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" event={"ID":"de8315dd-951d-4fe6-a8a9-bc4dd3094743","Type":"ContainerStarted","Data":"2e95b2d97bc8cd52a9710f0be7eebcda78f01e35a0358077b4001bb6a52e2ac6"} Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.205043 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.205093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" event={"ID":"de8315dd-951d-4fe6-a8a9-bc4dd3094743","Type":"ContainerStarted","Data":"18bdafd44f937d256a1aeb1c468ab5a42d71efa287ebdb491ac742902ebd1482"} Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.207663 4857 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-nx7ct container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" start-of-body= Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.207715 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.56:8080/healthz\": dial tcp 10.217.0.56:8080: connect: connection refused" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.207885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gh8mn" event={"ID":"0642909d-f900-49c8-919f-921a3fc66ac8","Type":"ContainerDied","Data":"4ce630cc4f189f8f55a1b3eb3a6857c512f5e096af35c5e8191b2adb6b289aa2"} Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.207923 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gh8mn" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.207993 4857 scope.go:117] "RemoveContainer" containerID="53c68d091d92bed0ccf1da5024eed145412eed7da3f2d16466b1303b3717855c" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.213450 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qf98m" event={"ID":"1c8a00dc-9bad-4bb0-8003-9b8eb43299cd","Type":"ContainerDied","Data":"55135305bbcb21018c21607ef5a2b0d1f47a18543544c31964202a2409e2cf0a"} Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.213501 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qf98m" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.225274 4857 scope.go:117] "RemoveContainer" containerID="8d2d6a32f4f09bab0a235a52d7f31166158cb8c2d119a4988ad8f37f02cb0ed6" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.239388 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" podStartSLOduration=2.239370994 podStartE2EDuration="2.239370994s" podCreationTimestamp="2025-11-28 13:32:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:32:56.22786545 +0000 UTC m=+226.351806887" watchObservedRunningTime="2025-11-28 13:32:56.239370994 +0000 UTC m=+226.363312431" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.240767 4857 scope.go:117] "RemoveContainer" containerID="85c0dd82a18e3a7ed179d6d44a93a215dea45da877905b6ed336f6b9a4ea671f" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.259111 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51677670-528c-40ba-acf4-e9b506e48a84" path="/var/lib/kubelet/pods/51677670-528c-40ba-acf4-e9b506e48a84/volumes" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.259879 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" path="/var/lib/kubelet/pods/62de45bc-ae71-4802-858e-2a9ac94455ce/volumes" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.260642 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" path="/var/lib/kubelet/pods/71a8af09-48a3-4be5-b08a-7e5381ecca76/volumes" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.262590 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" path="/var/lib/kubelet/pods/d31f352c-2360-4d7e-bb8d-6bfa04257c06/volumes" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.263335 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gh8mn"] Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.263368 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gh8mn"] Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.265908 4857 scope.go:117] "RemoveContainer" containerID="71cb86221f1211097136652ea505662bcbd330b8e575189c88dd298802458c93" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.267067 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qf98m"] Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.269832 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qf98m"] Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.283334 4857 scope.go:117] "RemoveContainer" containerID="46bcd1223e6d52f6396d8e13417c01c6715a42841e7a1e07eaac5946d9333c4b" Nov 28 13:32:56 crc kubenswrapper[4857]: I1128 13:32:56.301708 4857 scope.go:117] "RemoveContainer" containerID="99e157b50976c52db9885bfc9d48b9006a36a74e1293891fbd6455366e696c62" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.232647 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.254755 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-t6lt4"] Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255000 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255017 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255031 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51677670-528c-40ba-acf4-e9b506e48a84" containerName="marketplace-operator" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255039 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="51677670-528c-40ba-acf4-e9b506e48a84" containerName="marketplace-operator" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255052 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255060 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255072 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255080 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255091 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255098 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255109 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255117 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255129 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255137 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255146 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255154 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255165 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255170 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255178 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255183 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="extract-content" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255191 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255198 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255208 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255217 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255224 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255231 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255241 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255248 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255258 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255265 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: E1128 13:32:57.255277 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255285 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="extract-utilities" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255385 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255402 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d31f352c-2360-4d7e-bb8d-6bfa04257c06" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255416 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="71a8af09-48a3-4be5-b08a-7e5381ecca76" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255423 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="51677670-528c-40ba-acf4-e9b506e48a84" containerName="marketplace-operator" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255435 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="62de45bc-ae71-4802-858e-2a9ac94455ce" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.255445 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" containerName="registry-server" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.256432 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.258984 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.267678 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t6lt4"] Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.440714 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6blx\" (UniqueName: \"kubernetes.io/projected/88424fcd-c8d4-4df4-8176-30471a90470d-kube-api-access-q6blx\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.440767 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-utilities\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.440807 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-catalog-content\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.542127 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-catalog-content\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.542211 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q6blx\" (UniqueName: \"kubernetes.io/projected/88424fcd-c8d4-4df4-8176-30471a90470d-kube-api-access-q6blx\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.542234 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-utilities\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.542632 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-catalog-content\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.542685 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-utilities\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.561761 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q6blx\" (UniqueName: \"kubernetes.io/projected/88424fcd-c8d4-4df4-8176-30471a90470d-kube-api-access-q6blx\") pod \"certified-operators-t6lt4\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.581810 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:32:57 crc kubenswrapper[4857]: I1128 13:32:57.959552 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-t6lt4"] Nov 28 13:32:57 crc kubenswrapper[4857]: W1128 13:32:57.968609 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod88424fcd_c8d4_4df4_8176_30471a90470d.slice/crio-d10622a59b30be4cb613118c76b8a01dca1eec778400ddcea7e592ec62d7884c WatchSource:0}: Error finding container d10622a59b30be4cb613118c76b8a01dca1eec778400ddcea7e592ec62d7884c: Status 404 returned error can't find the container with id d10622a59b30be4cb613118c76b8a01dca1eec778400ddcea7e592ec62d7884c Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.234186 4857 generic.go:334] "Generic (PLEG): container finished" podID="88424fcd-c8d4-4df4-8176-30471a90470d" containerID="3b557e5ba53b519b38b97316b1a4d239ea3f9ddfbff13b89824be137e6190389" exitCode=0 Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.235693 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0642909d-f900-49c8-919f-921a3fc66ac8" path="/var/lib/kubelet/pods/0642909d-f900-49c8-919f-921a3fc66ac8/volumes" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.236381 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c8a00dc-9bad-4bb0-8003-9b8eb43299cd" path="/var/lib/kubelet/pods/1c8a00dc-9bad-4bb0-8003-9b8eb43299cd/volumes" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.236909 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerDied","Data":"3b557e5ba53b519b38b97316b1a4d239ea3f9ddfbff13b89824be137e6190389"} Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.236937 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerStarted","Data":"d10622a59b30be4cb613118c76b8a01dca1eec778400ddcea7e592ec62d7884c"} Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.255409 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zw8sc"] Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.260211 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.263651 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.265824 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zw8sc"] Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.353519 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-utilities\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.353748 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-catalog-content\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.354506 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d7km\" (UniqueName: \"kubernetes.io/projected/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-kube-api-access-7d7km\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.455703 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d7km\" (UniqueName: \"kubernetes.io/projected/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-kube-api-access-7d7km\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.455769 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-utilities\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.455818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-catalog-content\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.456272 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-utilities\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.456326 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-catalog-content\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.474833 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d7km\" (UniqueName: \"kubernetes.io/projected/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-kube-api-access-7d7km\") pod \"redhat-operators-zw8sc\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.582864 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:32:58 crc kubenswrapper[4857]: I1128 13:32:58.764221 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zw8sc"] Nov 28 13:32:58 crc kubenswrapper[4857]: W1128 13:32:58.767112 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5ae31f7_1a7f_414c_9fd3_dc7818c66483.slice/crio-abd27e66f0284251c51c78f39085ec66a50acb92ef9a3758861844d7affa9d6f WatchSource:0}: Error finding container abd27e66f0284251c51c78f39085ec66a50acb92ef9a3758861844d7affa9d6f: Status 404 returned error can't find the container with id abd27e66f0284251c51c78f39085ec66a50acb92ef9a3758861844d7affa9d6f Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.241684 4857 generic.go:334] "Generic (PLEG): container finished" podID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerID="d4e3dbc9639e2175c33e952a51809f762d59d17a5c7ba964c4e7db506bc0e4f8" exitCode=0 Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.241972 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerDied","Data":"d4e3dbc9639e2175c33e952a51809f762d59d17a5c7ba964c4e7db506bc0e4f8"} Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.242136 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerStarted","Data":"abd27e66f0284251c51c78f39085ec66a50acb92ef9a3758861844d7affa9d6f"} Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.249679 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerStarted","Data":"5c0e67229c74176d01bba2125bbdb7df96ffa85b3e5b4a354de21ceae2afb218"} Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.655063 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t6t2s"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.656798 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.659909 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.661474 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t6t2s"] Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.771203 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfvdz\" (UniqueName: \"kubernetes.io/projected/a45744b5-0329-43e7-834a-535eacdf9717-kube-api-access-tfvdz\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.771415 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-utilities\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.771551 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-catalog-content\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.873083 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-utilities\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.873152 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-catalog-content\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.873183 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfvdz\" (UniqueName: \"kubernetes.io/projected/a45744b5-0329-43e7-834a-535eacdf9717-kube-api-access-tfvdz\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.873915 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-utilities\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.874188 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-catalog-content\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.892061 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfvdz\" (UniqueName: \"kubernetes.io/projected/a45744b5-0329-43e7-834a-535eacdf9717-kube-api-access-tfvdz\") pod \"community-operators-t6t2s\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:32:59 crc kubenswrapper[4857]: I1128 13:32:59.983290 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.255570 4857 generic.go:334] "Generic (PLEG): container finished" podID="88424fcd-c8d4-4df4-8176-30471a90470d" containerID="5c0e67229c74176d01bba2125bbdb7df96ffa85b3e5b4a354de21ceae2afb218" exitCode=0 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.255630 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerDied","Data":"5c0e67229c74176d01bba2125bbdb7df96ffa85b3e5b4a354de21ceae2afb218"} Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.258729 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerStarted","Data":"ec546d5ae7185b08abfbb757dd7c10739d043fca287768831d31f8ddb03dcba7"} Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.374699 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t6t2s"] Nov 28 13:33:00 crc kubenswrapper[4857]: W1128 13:33:00.382857 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda45744b5_0329_43e7_834a_535eacdf9717.slice/crio-49b301bc394142d6e80316e648914d818ca8c738ac365e1bd1108349a1c7fff4 WatchSource:0}: Error finding container 49b301bc394142d6e80316e648914d818ca8c738ac365e1bd1108349a1c7fff4: Status 404 returned error can't find the container with id 49b301bc394142d6e80316e648914d818ca8c738ac365e1bd1108349a1c7fff4 Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.656256 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z7m4v"] Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.657530 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.660076 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.663389 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7m4v"] Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.782559 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-utilities\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.782636 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-catalog-content\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.782702 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75snf\" (UniqueName: \"kubernetes.io/projected/1c9b9dd6-86eb-4361-a872-9027be0c909f-kube-api-access-75snf\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.883697 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75snf\" (UniqueName: \"kubernetes.io/projected/1c9b9dd6-86eb-4361-a872-9027be0c909f-kube-api-access-75snf\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.883766 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-utilities\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.883827 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-catalog-content\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.884803 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-catalog-content\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.884843 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-utilities\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.907663 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75snf\" (UniqueName: \"kubernetes.io/projected/1c9b9dd6-86eb-4361-a872-9027be0c909f-kube-api-access-75snf\") pod \"redhat-marketplace-z7m4v\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:00 crc kubenswrapper[4857]: I1128 13:33:00.989100 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.268966 4857 generic.go:334] "Generic (PLEG): container finished" podID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerID="ec546d5ae7185b08abfbb757dd7c10739d043fca287768831d31f8ddb03dcba7" exitCode=0 Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.269303 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerDied","Data":"ec546d5ae7185b08abfbb757dd7c10739d043fca287768831d31f8ddb03dcba7"} Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.273741 4857 generic.go:334] "Generic (PLEG): container finished" podID="a45744b5-0329-43e7-834a-535eacdf9717" containerID="d057d540d993c8ef4efeb5aaa3c882c6513f5a748d2aae387ffcc86bcfb02720" exitCode=0 Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.273830 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerDied","Data":"d057d540d993c8ef4efeb5aaa3c882c6513f5a748d2aae387ffcc86bcfb02720"} Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.273860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerStarted","Data":"49b301bc394142d6e80316e648914d818ca8c738ac365e1bd1108349a1c7fff4"} Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.277404 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerStarted","Data":"8d61c48bd240863a4d3a089bd3d2dfa1fc73910d57ca5aff84737944d59816d1"} Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.324115 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-t6lt4" podStartSLOduration=1.845655254 podStartE2EDuration="4.324096667s" podCreationTimestamp="2025-11-28 13:32:57 +0000 UTC" firstStartedPulling="2025-11-28 13:32:58.238409866 +0000 UTC m=+228.362351303" lastFinishedPulling="2025-11-28 13:33:00.716851279 +0000 UTC m=+230.840792716" observedRunningTime="2025-11-28 13:33:01.322140884 +0000 UTC m=+231.446082321" watchObservedRunningTime="2025-11-28 13:33:01.324096667 +0000 UTC m=+231.448038114" Nov 28 13:33:01 crc kubenswrapper[4857]: I1128 13:33:01.378511 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7m4v"] Nov 28 13:33:01 crc kubenswrapper[4857]: W1128 13:33:01.383380 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c9b9dd6_86eb_4361_a872_9027be0c909f.slice/crio-852b5af1ce6de42ac38edbbbedb12077ec3fb4a61f6f9844f62c3f6ec7136848 WatchSource:0}: Error finding container 852b5af1ce6de42ac38edbbbedb12077ec3fb4a61f6f9844f62c3f6ec7136848: Status 404 returned error can't find the container with id 852b5af1ce6de42ac38edbbbedb12077ec3fb4a61f6f9844f62c3f6ec7136848 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.198196 4857 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.199373 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.200540 4857 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.200615 4857 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.201557 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31" gracePeriod=15 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.201713 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd" gracePeriod=15 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.201760 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46" gracePeriod=15 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.201800 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2" gracePeriod=15 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.201845 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71" gracePeriod=15 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.201902 4857 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202086 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202104 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202112 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202119 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202129 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202135 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202142 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202148 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202162 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202168 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202175 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202180 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.202189 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202196 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202274 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202284 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202292 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202300 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202308 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.202316 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.278217 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.300380 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerStarted","Data":"8c60cc6d2fb177f18ec9983d6fa0a303463e52132c4f3faf516569def94b6df0"} Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.301371 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.301691 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302154 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302199 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302242 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302271 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302326 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302364 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.302431 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.305336 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerStarted","Data":"e9d2fb3582453cda1d291d339591ed1a565c00d5e486c990d46e15881b8e45db"} Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.306639 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerID="018f942b13cbe461ed784d4b79edde34482a5eb8e81d5badc1c83cc668ba6d95" exitCode=0 Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.306745 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.306896 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.307067 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.307270 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7m4v" event={"ID":"1c9b9dd6-86eb-4361-a872-9027be0c909f","Type":"ContainerDied","Data":"018f942b13cbe461ed784d4b79edde34482a5eb8e81d5badc1c83cc668ba6d95"} Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.307294 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7m4v" event={"ID":"1c9b9dd6-86eb-4361-a872-9027be0c909f","Type":"ContainerStarted","Data":"852b5af1ce6de42ac38edbbbedb12077ec3fb4a61f6f9844f62c3f6ec7136848"} Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.307651 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.307815 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.307969 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.308111 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:02 crc kubenswrapper[4857]: E1128 13:33:02.325599 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-z7m4v.187c2ef4a6a09086 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-z7m4v,UID:1c9b9dd6-86eb-4361-a872-9027be0c909f,APIVersion:v1,ResourceVersion:29419,FieldPath:spec.initContainers{extract-content},},Reason:Pulling,Message:Pulling image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:33:02.324830342 +0000 UTC m=+232.448771779,LastTimestamp:2025-11-28 13:33:02.324830342 +0000 UTC m=+232.448771779,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403525 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403582 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403623 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403645 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403697 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.403752 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.404179 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.404228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.404622 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.404643 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.404660 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.405151 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.405173 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.405215 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: I1128 13:33:02.534673 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:02 crc kubenswrapper[4857]: W1128 13:33:02.554514 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-b4d40c143ded11a18024de735665c1b3507bd14012150c692d3798e27579f45e WatchSource:0}: Error finding container b4d40c143ded11a18024de735665c1b3507bd14012150c692d3798e27579f45e: Status 404 returned error can't find the container with id b4d40c143ded11a18024de735665c1b3507bd14012150c692d3798e27579f45e Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.314847 4857 generic.go:334] "Generic (PLEG): container finished" podID="a45744b5-0329-43e7-834a-535eacdf9717" containerID="e9d2fb3582453cda1d291d339591ed1a565c00d5e486c990d46e15881b8e45db" exitCode=0 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.314894 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerDied","Data":"e9d2fb3582453cda1d291d339591ed1a565c00d5e486c990d46e15881b8e45db"} Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.315777 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.316618 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.317150 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.317449 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.317800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221"} Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.317836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"b4d40c143ded11a18024de735665c1b3507bd14012150c692d3798e27579f45e"} Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.318417 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.318813 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.319215 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.319571 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.321030 4857 generic.go:334] "Generic (PLEG): container finished" podID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" containerID="3a27b6b4953c2786e2ae9c288a3775d80c3eb05416fed76b9967d63d29244a7b" exitCode=0 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.321159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"0925aef1-cbdf-4f9b-bde9-b18b40e006d4","Type":"ContainerDied","Data":"3a27b6b4953c2786e2ae9c288a3775d80c3eb05416fed76b9967d63d29244a7b"} Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.321956 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.322280 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.322530 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.322712 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.322895 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.324503 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.326846 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.328337 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd" exitCode=0 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.328367 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46" exitCode=0 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.328377 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2" exitCode=0 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.328385 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71" exitCode=2 Nov 28 13:33:03 crc kubenswrapper[4857]: I1128 13:33:03.328475 4857 scope.go:117] "RemoveContainer" containerID="ba6a0422a0c2b422a20a2d1b6d249c1208155dafb98f7c8a4b170f21cee841d1" Nov 28 13:33:03 crc kubenswrapper[4857]: E1128 13:33:03.370548 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-z7m4v.187c2ef4a6a09086 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-z7m4v,UID:1c9b9dd6-86eb-4361-a872-9027be0c909f,APIVersion:v1,ResourceVersion:29419,FieldPath:spec.initContainers{extract-content},},Reason:Pulling,Message:Pulling image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:33:02.324830342 +0000 UTC m=+232.448771779,LastTimestamp:2025-11-28 13:33:02.324830342 +0000 UTC m=+232.448771779,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.352906 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.366324 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerID="626db5dc2c1b52c1db63ce240fb462035feaf5c6a91fbe742cd23b9dda342c2b" exitCode=0 Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.366628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7m4v" event={"ID":"1c9b9dd6-86eb-4361-a872-9027be0c909f","Type":"ContainerDied","Data":"626db5dc2c1b52c1db63ce240fb462035feaf5c6a91fbe742cd23b9dda342c2b"} Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.368371 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.368583 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.368769 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.368958 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.369137 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.880709 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.881430 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.881729 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.881994 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.882238 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.882454 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.935845 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kubelet-dir\") pod \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.935927 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "0925aef1-cbdf-4f9b-bde9-b18b40e006d4" (UID: "0925aef1-cbdf-4f9b-bde9-b18b40e006d4"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.936024 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-var-lock\") pod \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.936052 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kube-api-access\") pod \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\" (UID: \"0925aef1-cbdf-4f9b-bde9-b18b40e006d4\") " Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.936092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-var-lock" (OuterVolumeSpecName: "var-lock") pod "0925aef1-cbdf-4f9b-bde9-b18b40e006d4" (UID: "0925aef1-cbdf-4f9b-bde9-b18b40e006d4"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.936352 4857 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.936377 4857 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:04 crc kubenswrapper[4857]: I1128 13:33:04.944409 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0925aef1-cbdf-4f9b-bde9-b18b40e006d4" (UID: "0925aef1-cbdf-4f9b-bde9-b18b40e006d4"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.037658 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0925aef1-cbdf-4f9b-bde9-b18b40e006d4-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.087794 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.088488 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.089290 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.090112 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.090661 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.091244 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.091692 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.092075 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138384 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138436 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138496 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138549 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138599 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138666 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138961 4857 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138978 4857 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.138990 4857 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.375380 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"0925aef1-cbdf-4f9b-bde9-b18b40e006d4","Type":"ContainerDied","Data":"bf91ebd55e75dd1af147cae3e2d8284fbbad52773f07d466f89c3260893cb4cd"} Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.377319 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf91ebd55e75dd1af147cae3e2d8284fbbad52773f07d466f89c3260893cb4cd" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.375441 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.380584 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.383575 4857 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31" exitCode=0 Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.383683 4857 scope.go:117] "RemoveContainer" containerID="186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.383857 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.394732 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7m4v" event={"ID":"1c9b9dd6-86eb-4361-a872-9027be0c909f","Type":"ContainerStarted","Data":"2d96ae691c43e0e87c7e85bd80c8094434fb91d1a245147ca07ba1ab746bb90a"} Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.395584 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.395905 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.396218 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.396495 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.396864 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.397415 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerStarted","Data":"2f2143955fc9176d415a6e765e33f6a5fdf28352081d64c27f5ed62bda7bbb15"} Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.398084 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.398428 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.399117 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.400149 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.400556 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.411380 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.411750 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.413020 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.413537 4857 scope.go:117] "RemoveContainer" containerID="24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.413755 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.414105 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.414344 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.414534 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.414823 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.415065 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.415308 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.415525 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.415763 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.415995 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.416147 4857 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.427247 4857 scope.go:117] "RemoveContainer" containerID="1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.444303 4857 scope.go:117] "RemoveContainer" containerID="66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.460031 4857 scope.go:117] "RemoveContainer" containerID="314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.480119 4857 scope.go:117] "RemoveContainer" containerID="974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.513003 4857 scope.go:117] "RemoveContainer" containerID="186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.513489 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\": container with ID starting with 186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd not found: ID does not exist" containerID="186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.513549 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd"} err="failed to get container status \"186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\": rpc error: code = NotFound desc = could not find container \"186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd\": container with ID starting with 186eea92c558aefc818880fbbdf1e18815b33e863362c4335221314bac8c38fd not found: ID does not exist" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.513574 4857 scope.go:117] "RemoveContainer" containerID="24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.514095 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\": container with ID starting with 24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46 not found: ID does not exist" containerID="24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.514126 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46"} err="failed to get container status \"24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\": rpc error: code = NotFound desc = could not find container \"24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46\": container with ID starting with 24de286b8c58eaa2fc3072eec4fbfdff8c36df2ed84989b000a27e634c6abc46 not found: ID does not exist" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.514146 4857 scope.go:117] "RemoveContainer" containerID="1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.516868 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\": container with ID starting with 1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2 not found: ID does not exist" containerID="1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.517025 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2"} err="failed to get container status \"1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\": rpc error: code = NotFound desc = could not find container \"1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2\": container with ID starting with 1264f824e69907be4f6474067a56002c10aa5f9bb146c6e21846de44664d41a2 not found: ID does not exist" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.517134 4857 scope.go:117] "RemoveContainer" containerID="66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.517699 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\": container with ID starting with 66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71 not found: ID does not exist" containerID="66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.517756 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71"} err="failed to get container status \"66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\": rpc error: code = NotFound desc = could not find container \"66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71\": container with ID starting with 66c9c09c131bfda2feb06b04d0936ebc297fd0c8c6554c48d09b92ac7e125f71 not found: ID does not exist" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.517786 4857 scope.go:117] "RemoveContainer" containerID="314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.518113 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\": container with ID starting with 314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31 not found: ID does not exist" containerID="314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.518152 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31"} err="failed to get container status \"314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\": rpc error: code = NotFound desc = could not find container \"314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31\": container with ID starting with 314e76d0ad97af84a0048549314954ae789c09066cba658bd55b726ffff02c31 not found: ID does not exist" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.518180 4857 scope.go:117] "RemoveContainer" containerID="974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.518700 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\": container with ID starting with 974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba not found: ID does not exist" containerID="974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.518722 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba"} err="failed to get container status \"974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\": rpc error: code = NotFound desc = could not find container \"974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba\": container with ID starting with 974d22aa6b68f22be55ea809a6d5980b50c47556b22d0b4d1bf8d894e87869ba not found: ID does not exist" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.857645 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.858403 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.858664 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.858889 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.859134 4857 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:05 crc kubenswrapper[4857]: I1128 13:33:05.859243 4857 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 28 13:33:05 crc kubenswrapper[4857]: E1128 13:33:05.859451 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="200ms" Nov 28 13:33:06 crc kubenswrapper[4857]: E1128 13:33:06.061033 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="400ms" Nov 28 13:33:06 crc kubenswrapper[4857]: I1128 13:33:06.236918 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 28 13:33:06 crc kubenswrapper[4857]: E1128 13:33:06.462285 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="800ms" Nov 28 13:33:07 crc kubenswrapper[4857]: E1128 13:33:07.263346 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="1.6s" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.582981 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.583616 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.624066 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.624432 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.625141 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.625772 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.626015 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.626286 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:07 crc kubenswrapper[4857]: I1128 13:33:07.626700 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: E1128 13:33:08.232000 4857 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.222:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" volumeName="registry-storage" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.458630 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.459580 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.459893 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.460376 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.460647 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.460932 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.461209 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.583894 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.584515 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.619660 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.620697 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.621183 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.621675 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.622180 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.622522 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: I1128 13:33:08.623015 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:08 crc kubenswrapper[4857]: E1128 13:33:08.864484 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="3.2s" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.459658 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.460206 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.460658 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.461451 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.461743 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.462154 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.462818 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.983993 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:33:09 crc kubenswrapper[4857]: I1128 13:33:09.985287 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.026267 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.026943 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.027333 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.027545 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.027799 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.028026 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.028192 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.231313 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.231830 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.232129 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.232633 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.233309 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.233629 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.489047 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.489595 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.490025 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.490278 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.490792 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.491810 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.492109 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.990124 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:10 crc kubenswrapper[4857]: I1128 13:33:10.990182 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.033212 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.033957 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.034397 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.034775 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.035169 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.035574 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.035918 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.483529 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.484395 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.485431 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.486100 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.486496 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.486841 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:11 crc kubenswrapper[4857]: I1128 13:33:11.487263 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:12 crc kubenswrapper[4857]: E1128 13:33:12.065339 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="6.4s" Nov 28 13:33:13 crc kubenswrapper[4857]: E1128 13:33:13.371827 4857 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.222:6443: connect: connection refused" event="&Event{ObjectMeta:{redhat-marketplace-z7m4v.187c2ef4a6a09086 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:redhat-marketplace-z7m4v,UID:1c9b9dd6-86eb-4361-a872-9027be0c909f,APIVersion:v1,ResourceVersion:29419,FieldPath:spec.initContainers{extract-content},},Reason:Pulling,Message:Pulling image \"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-28 13:33:02.324830342 +0000 UTC m=+232.448771779,LastTimestamp:2025-11-28 13:33:02.324830342 +0000 UTC m=+232.448771779,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.228413 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.229777 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.230640 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.231154 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.231424 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.232277 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.232628 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.244657 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.244693 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:16 crc kubenswrapper[4857]: E1128 13:33:16.245160 4857 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.245485 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:16 crc kubenswrapper[4857]: W1128 13:33:16.264189 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-2087c0c0536547edbbe441e60ad0c475f134a4b031fcdb10558a773094162d55 WatchSource:0}: Error finding container 2087c0c0536547edbbe441e60ad0c475f134a4b031fcdb10558a773094162d55: Status 404 returned error can't find the container with id 2087c0c0536547edbbe441e60ad0c475f134a4b031fcdb10558a773094162d55 Nov 28 13:33:16 crc kubenswrapper[4857]: I1128 13:33:16.455773 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2087c0c0536547edbbe441e60ad0c475f134a4b031fcdb10558a773094162d55"} Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.637631 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.638073 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.653272 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" containerName="oauth-openshift" containerID="cri-o://deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b" gracePeriod=15 Nov 28 13:33:20 crc kubenswrapper[4857]: E1128 13:33:20.653636 4857 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.222:6443: connect: connection refused" interval="7s" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.661620 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.661857 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.662072 4857 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.662283 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.662540 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.663320 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.663530 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.974989 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.975524 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.975930 4857 status_manager.go:851] "Failed to get status for pod" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-m658x\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.976339 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.976601 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.976803 4857 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.977035 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.977293 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:20 crc kubenswrapper[4857]: I1128 13:33:20.977510 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154237 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-session\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154621 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-dir\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154653 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mbsm\" (UniqueName: \"kubernetes.io/projected/7c609810-eec1-4f73-ad29-24fc190b1ffa-kube-api-access-7mbsm\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154674 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154685 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-serving-cert\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154742 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-policies\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154768 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-idp-0-file-data\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154814 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-login\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154843 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-router-certs\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154867 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-ocp-branding-template\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154895 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-error\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154922 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-service-ca\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.154989 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-trusted-ca-bundle\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.155015 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-provider-selection\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.155040 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-cliconfig\") pod \"7c609810-eec1-4f73-ad29-24fc190b1ffa\" (UID: \"7c609810-eec1-4f73-ad29-24fc190b1ffa\") " Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.155210 4857 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.155879 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.155980 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.156414 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.156412 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.161397 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c609810-eec1-4f73-ad29-24fc190b1ffa-kube-api-access-7mbsm" (OuterVolumeSpecName: "kube-api-access-7mbsm") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "kube-api-access-7mbsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.162028 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.163161 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.163385 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.164612 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.164729 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.165075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.165155 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.165325 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "7c609810-eec1-4f73-ad29-24fc190b1ffa" (UID: "7c609810-eec1-4f73-ad29-24fc190b1ffa"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256181 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256211 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256221 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256236 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256314 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256421 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mbsm\" (UniqueName: \"kubernetes.io/projected/7c609810-eec1-4f73-ad29-24fc190b1ffa-kube-api-access-7mbsm\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256485 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256600 4857 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7c609810-eec1-4f73-ad29-24fc190b1ffa-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256642 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256657 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256670 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256683 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.256697 4857 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7c609810-eec1-4f73-ad29-24fc190b1ffa-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.676897 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.676897 4857 generic.go:334] "Generic (PLEG): container finished" podID="7c609810-eec1-4f73-ad29-24fc190b1ffa" containerID="deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b" exitCode=0 Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.678256 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" event={"ID":"7c609810-eec1-4f73-ad29-24fc190b1ffa","Type":"ContainerDied","Data":"deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b"} Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.678305 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" event={"ID":"7c609810-eec1-4f73-ad29-24fc190b1ffa","Type":"ContainerDied","Data":"2aa97746e6549ba1a69644620130772b06b5346b1743e7a6f3eb72c8e7a2232d"} Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.678325 4857 scope.go:117] "RemoveContainer" containerID="deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.679183 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.679436 4857 status_manager.go:851] "Failed to get status for pod" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-m658x\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.679755 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.680094 4857 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.680407 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.681245 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.681730 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.681847 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.681883 4857 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0" exitCode=1 Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.681930 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0"} Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.682202 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.682496 4857 scope.go:117] "RemoveContainer" containerID="669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.682489 4857 status_manager.go:851] "Failed to get status for pod" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-m658x\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.682926 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.683457 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.684063 4857 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.684333 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.684556 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.684621 4857 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="f3a5d18a04dfd66cbe3bf048c94c9741be59257489ddc645e5a70877e27e91d7" exitCode=0 Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.684680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"f3a5d18a04dfd66cbe3bf048c94c9741be59257489ddc645e5a70877e27e91d7"} Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.684745 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.685020 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.685046 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.685111 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: E1128 13:33:21.685472 4857 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.685555 4857 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.686049 4857 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.686367 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.686663 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.687008 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.687252 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.687493 4857 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.687738 4857 status_manager.go:851] "Failed to get status for pod" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-m658x\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.688065 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.688360 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.693716 4857 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.693887 4857 status_manager.go:851] "Failed to get status for pod" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" pod="openshift-authentication/oauth-openshift-558db77b4-m658x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-m658x\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.694250 4857 status_manager.go:851] "Failed to get status for pod" podUID="a45744b5-0329-43e7-834a-535eacdf9717" pod="openshift-marketplace/community-operators-t6t2s" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-t6t2s\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.694765 4857 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.695380 4857 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.695981 4857 status_manager.go:851] "Failed to get status for pod" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" pod="openshift-marketplace/redhat-operators-zw8sc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-zw8sc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.696811 4857 status_manager.go:851] "Failed to get status for pod" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" pod="openshift-marketplace/redhat-marketplace-z7m4v" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-z7m4v\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.697068 4857 status_manager.go:851] "Failed to get status for pod" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" pod="openshift-marketplace/certified-operators-t6lt4" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-t6lt4\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.697399 4857 status_manager.go:851] "Failed to get status for pod" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.222:6443: connect: connection refused" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.700347 4857 scope.go:117] "RemoveContainer" containerID="deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b" Nov 28 13:33:21 crc kubenswrapper[4857]: E1128 13:33:21.700838 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b\": container with ID starting with deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b not found: ID does not exist" containerID="deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b" Nov 28 13:33:21 crc kubenswrapper[4857]: I1128 13:33:21.700872 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b"} err="failed to get container status \"deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b\": rpc error: code = NotFound desc = could not find container \"deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b\": container with ID starting with deef88f314cf372c9bcefc03c25b4fb70427d4c2fa82f99f43c98c2426bd9f7b not found: ID does not exist" Nov 28 13:33:22 crc kubenswrapper[4857]: I1128 13:33:22.702823 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 13:33:22 crc kubenswrapper[4857]: I1128 13:33:22.703258 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"64d9a46039ca93b2fdd84dfed721e48ee0a3e7ec02d62e36c70b77f91d756a8a"} Nov 28 13:33:22 crc kubenswrapper[4857]: I1128 13:33:22.708761 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"20112e5846989642a88d20e5d1db7b12a2c1e8e87e8a3944b0dda6ff8200400f"} Nov 28 13:33:22 crc kubenswrapper[4857]: I1128 13:33:22.708808 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"dd184b5c741133b532fe0e87594b8fc919c0550d35235b037843c22116d53eaf"} Nov 28 13:33:22 crc kubenswrapper[4857]: I1128 13:33:22.708820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"00a86d523f6733bbbd3dd55526d758f14bf34586013dbfe2a950be237b389904"} Nov 28 13:33:22 crc kubenswrapper[4857]: I1128 13:33:22.708831 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a55d0c65aa7cb0f2585467e03f09f86971f29cddcdedb8ee5aa0698308f71363"} Nov 28 13:33:23 crc kubenswrapper[4857]: I1128 13:33:23.715541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"296435303fd09ac6e78f404785edb1aaf0717e71b6f1e179f4efa62b2628bc70"} Nov 28 13:33:23 crc kubenswrapper[4857]: I1128 13:33:23.715887 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:23 crc kubenswrapper[4857]: I1128 13:33:23.715902 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:23 crc kubenswrapper[4857]: I1128 13:33:23.715912 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:25 crc kubenswrapper[4857]: I1128 13:33:25.714153 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.245854 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.246183 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:26 crc kubenswrapper[4857]: I1128 13:33:26.251091 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:27 crc kubenswrapper[4857]: I1128 13:33:27.970885 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:33:27 crc kubenswrapper[4857]: I1128 13:33:27.971621 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:33:27 crc kubenswrapper[4857]: I1128 13:33:27.970912 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:33:28 crc kubenswrapper[4857]: I1128 13:33:28.723329 4857 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:28 crc kubenswrapper[4857]: I1128 13:33:28.741491 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:28 crc kubenswrapper[4857]: I1128 13:33:28.741724 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:28 crc kubenswrapper[4857]: I1128 13:33:28.744976 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:28 crc kubenswrapper[4857]: I1128 13:33:28.747053 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c4cd0cff-4778-41b5-b099-924080823167" Nov 28 13:33:29 crc kubenswrapper[4857]: I1128 13:33:29.747928 4857 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:29 crc kubenswrapper[4857]: I1128 13:33:29.748021 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="69c268e4-8cda-4a40-a67e-90e06bd93d09" Nov 28 13:33:30 crc kubenswrapper[4857]: I1128 13:33:30.251500 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c4cd0cff-4778-41b5-b099-924080823167" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.718927 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.971877 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:33:37 crc kubenswrapper[4857]: I1128 13:33:37.972040 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.373125 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 28 13:33:38 crc kubenswrapper[4857]: I1128 13:33:38.782448 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 28 13:33:39 crc kubenswrapper[4857]: I1128 13:33:39.463219 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.592478 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 28 13:33:40 crc kubenswrapper[4857]: I1128 13:33:40.821240 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.229878 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.231872 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.274418 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.312831 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.616123 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.734305 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 28 13:33:41 crc kubenswrapper[4857]: I1128 13:33:41.768935 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.002308 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.217464 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.227312 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.366829 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.521218 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.555931 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.772132 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.955270 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 28 13:33:42 crc kubenswrapper[4857]: I1128 13:33:42.956696 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.120102 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.186815 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.285458 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.338284 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.371299 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.383049 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.550293 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.559470 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.559886 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.612834 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.638590 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.698144 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 28 13:33:43 crc kubenswrapper[4857]: I1128 13:33:43.708227 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.025469 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.061162 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.288897 4857 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.341888 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.553850 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.636411 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.711676 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.721457 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.749548 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.895270 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 28 13:33:44 crc kubenswrapper[4857]: I1128 13:33:44.984487 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 28 13:33:45 crc kubenswrapper[4857]: I1128 13:33:45.300102 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 28 13:33:45 crc kubenswrapper[4857]: I1128 13:33:45.375249 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 28 13:33:45 crc kubenswrapper[4857]: I1128 13:33:45.744330 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 28 13:33:45 crc kubenswrapper[4857]: I1128 13:33:45.813359 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 28 13:33:45 crc kubenswrapper[4857]: I1128 13:33:45.862772 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 28 13:33:45 crc kubenswrapper[4857]: I1128 13:33:45.879536 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.091175 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.412916 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.588770 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.900685 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.958053 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 28 13:33:46 crc kubenswrapper[4857]: I1128 13:33:46.963121 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.148914 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.199571 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.343737 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.349751 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.479178 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.499247 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.510202 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.526073 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.578426 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.627999 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.770359 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.957575 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.971291 4857 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.971346 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.971397 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.971974 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"64d9a46039ca93b2fdd84dfed721e48ee0a3e7ec02d62e36c70b77f91d756a8a"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 28 13:33:47 crc kubenswrapper[4857]: I1128 13:33:47.972074 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://64d9a46039ca93b2fdd84dfed721e48ee0a3e7ec02d62e36c70b77f91d756a8a" gracePeriod=30 Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.012836 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.075313 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.157575 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.166222 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.242003 4857 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.243594 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zw8sc" podStartSLOduration=47.521740034 podStartE2EDuration="50.243574778s" podCreationTimestamp="2025-11-28 13:32:58 +0000 UTC" firstStartedPulling="2025-11-28 13:32:59.243730326 +0000 UTC m=+229.367671763" lastFinishedPulling="2025-11-28 13:33:01.96556507 +0000 UTC m=+232.089506507" observedRunningTime="2025-11-28 13:33:28.408093349 +0000 UTC m=+258.532034796" watchObservedRunningTime="2025-11-28 13:33:48.243574778 +0000 UTC m=+278.367516215" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.244250 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z7m4v" podStartSLOduration=45.629753252 podStartE2EDuration="48.244243136s" podCreationTimestamp="2025-11-28 13:33:00 +0000 UTC" firstStartedPulling="2025-11-28 13:33:02.324825282 +0000 UTC m=+232.448766719" lastFinishedPulling="2025-11-28 13:33:04.939315166 +0000 UTC m=+235.063256603" observedRunningTime="2025-11-28 13:33:28.427089158 +0000 UTC m=+258.551030595" watchObservedRunningTime="2025-11-28 13:33:48.244243136 +0000 UTC m=+278.368184573" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.244829 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=46.244820741 podStartE2EDuration="46.244820741s" podCreationTimestamp="2025-11-28 13:33:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:33:28.367243175 +0000 UTC m=+258.491184612" watchObservedRunningTime="2025-11-28 13:33:48.244820741 +0000 UTC m=+278.368762178" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.245009 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t6t2s" podStartSLOduration=46.3020468 podStartE2EDuration="49.245002286s" podCreationTimestamp="2025-11-28 13:32:59 +0000 UTC" firstStartedPulling="2025-11-28 13:33:01.275106161 +0000 UTC m=+231.399047598" lastFinishedPulling="2025-11-28 13:33:04.218061647 +0000 UTC m=+234.342003084" observedRunningTime="2025-11-28 13:33:28.34909734 +0000 UTC m=+258.473038777" watchObservedRunningTime="2025-11-28 13:33:48.245002286 +0000 UTC m=+278.368943723" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.246781 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-m658x"] Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.246833 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.254440 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.270925 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.271599 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=20.271581306 podStartE2EDuration="20.271581306s" podCreationTimestamp="2025-11-28 13:33:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:33:48.26730872 +0000 UTC m=+278.391250167" watchObservedRunningTime="2025-11-28 13:33:48.271581306 +0000 UTC m=+278.395522743" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.810781 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.878679 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 28 13:33:48 crc kubenswrapper[4857]: I1128 13:33:48.878903 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.033450 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.050964 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.065114 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.082316 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.193100 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.331673 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.355138 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.631963 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.759413 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.805764 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.831877 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.900591 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.927440 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.957434 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-d878cb77-rsxww"] Nov 28 13:33:49 crc kubenswrapper[4857]: E1128 13:33:49.957672 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" containerName="installer" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.957687 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" containerName="installer" Nov 28 13:33:49 crc kubenswrapper[4857]: E1128 13:33:49.957705 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" containerName="oauth-openshift" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.957713 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" containerName="oauth-openshift" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.957814 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" containerName="oauth-openshift" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.957836 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0925aef1-cbdf-4f9b-bde9-b18b40e006d4" containerName="installer" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.958276 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.960461 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.960966 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.960967 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.961917 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.961969 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.962006 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.963025 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.964539 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.964551 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.975427 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.976252 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.976351 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.982784 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.990040 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-d878cb77-rsxww"] Nov 28 13:33:49 crc kubenswrapper[4857]: I1128 13:33:49.999181 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.001462 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007315 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007384 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007661 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007721 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007751 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-audit-policies\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007793 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007821 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-login\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007852 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-session\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007875 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0515d4e1-de37-44db-92c1-afe760bb699c-audit-dir\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007898 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-service-ca\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007921 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-error\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.007976 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-router-certs\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109078 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109142 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109163 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rjtw\" (UniqueName: \"kubernetes.io/projected/0515d4e1-de37-44db-92c1-afe760bb699c-kube-api-access-6rjtw\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109181 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109206 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-audit-policies\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109262 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-login\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-session\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109299 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0515d4e1-de37-44db-92c1-afe760bb699c-audit-dir\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109315 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-service-ca\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109331 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-error\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109385 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-router-certs\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.109971 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-cliconfig\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.110225 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0515d4e1-de37-44db-92c1-afe760bb699c-audit-dir\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.110849 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-audit-policies\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.110929 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-service-ca\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.111673 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.115044 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.115088 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-session\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.115147 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-serving-cert\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.115331 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-error\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.115831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-router-certs\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.115990 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.117738 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-template-login\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.143992 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.210446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rjtw\" (UniqueName: \"kubernetes.io/projected/0515d4e1-de37-44db-92c1-afe760bb699c-kube-api-access-6rjtw\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.210536 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.214174 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0515d4e1-de37-44db-92c1-afe760bb699c-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.226463 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rjtw\" (UniqueName: \"kubernetes.io/projected/0515d4e1-de37-44db-92c1-afe760bb699c-kube-api-access-6rjtw\") pod \"oauth-openshift-d878cb77-rsxww\" (UID: \"0515d4e1-de37-44db-92c1-afe760bb699c\") " pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.235658 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c609810-eec1-4f73-ad29-24fc190b1ffa" path="/var/lib/kubelet/pods/7c609810-eec1-4f73-ad29-24fc190b1ffa/volumes" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.280118 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.570240 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.657574 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.788877 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.939192 4857 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:33:50 crc kubenswrapper[4857]: I1128 13:33:50.939460 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221" gracePeriod=5 Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.151934 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.196575 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.291172 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.536719 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.600179 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.713306 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.789419 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.872705 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.894018 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.952140 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 28 13:33:51 crc kubenswrapper[4857]: I1128 13:33:51.994597 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.009003 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.283053 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.579536 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.591273 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.681154 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.722882 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.746829 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.749504 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:33:52 crc kubenswrapper[4857]: I1128 13:33:52.986033 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.031121 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.073930 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 28 13:33:53 crc kubenswrapper[4857]: E1128 13:33:53.179236 4857 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 13:33:53 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c" Netns:"/var/run/netns/29107426-ce72-4957-b623-c7169cf68439" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:33:53 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:33:53 crc kubenswrapper[4857]: > Nov 28 13:33:53 crc kubenswrapper[4857]: E1128 13:33:53.179299 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 13:33:53 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c" Netns:"/var/run/netns/29107426-ce72-4957-b623-c7169cf68439" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:33:53 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:33:53 crc kubenswrapper[4857]: > pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:53 crc kubenswrapper[4857]: E1128 13:33:53.179317 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 13:33:53 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c" Netns:"/var/run/netns/29107426-ce72-4957-b623-c7169cf68439" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:33:53 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:33:53 crc kubenswrapper[4857]: > pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:53 crc kubenswrapper[4857]: E1128 13:33:53.179374 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-d878cb77-rsxww_openshift-authentication(0515d4e1-de37-44db-92c1-afe760bb699c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-d878cb77-rsxww_openshift-authentication(0515d4e1-de37-44db-92c1-afe760bb699c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c\\\" Netns:\\\"/var/run/netns/29107426-ce72-4957-b623-c7169cf68439\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=48972c9434cd4e8bbc925e5f9ceddbdf52d3e98a3fba4ff4f5483445c4a0e59c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod \\\"oauth-openshift-d878cb77-rsxww\\\" not found\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" podUID="0515d4e1-de37-44db-92c1-afe760bb699c" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.335418 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.382613 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.733857 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.834990 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.868070 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:53 crc kubenswrapper[4857]: I1128 13:33:53.868704 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:54 crc kubenswrapper[4857]: I1128 13:33:54.057361 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 28 13:33:54 crc kubenswrapper[4857]: I1128 13:33:54.064007 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 28 13:33:54 crc kubenswrapper[4857]: I1128 13:33:54.203473 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 28 13:33:54 crc kubenswrapper[4857]: I1128 13:33:54.335820 4857 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.557059 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.558115 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.703748 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.703880 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.703967 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704010 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704180 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704196 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704236 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704277 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704776 4857 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704800 4857 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704836 4857 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.704849 4857 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.720234 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:33:56 crc kubenswrapper[4857]: E1128 13:33:56.736389 4857 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 13:33:56 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c" Netns:"/var/run/netns/1ac3bee2-4080-4073-bb9e-5b6243005e1c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:33:56 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:33:56 crc kubenswrapper[4857]: > Nov 28 13:33:56 crc kubenswrapper[4857]: E1128 13:33:56.736454 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 13:33:56 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c" Netns:"/var/run/netns/1ac3bee2-4080-4073-bb9e-5b6243005e1c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:33:56 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:33:56 crc kubenswrapper[4857]: > pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:56 crc kubenswrapper[4857]: E1128 13:33:56.736474 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 13:33:56 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c" Netns:"/var/run/netns/1ac3bee2-4080-4073-bb9e-5b6243005e1c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:33:56 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:33:56 crc kubenswrapper[4857]: > pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:33:56 crc kubenswrapper[4857]: E1128 13:33:56.736522 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-d878cb77-rsxww_openshift-authentication(0515d4e1-de37-44db-92c1-afe760bb699c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-d878cb77-rsxww_openshift-authentication(0515d4e1-de37-44db-92c1-afe760bb699c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c\\\" Netns:\\\"/var/run/netns/1ac3bee2-4080-4073-bb9e-5b6243005e1c\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=14a804cbe0f609638e9b084d79357be4b0ecb9a256f7ac366781a89211fece8c;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod \\\"oauth-openshift-d878cb77-rsxww\\\" not found\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" podUID="0515d4e1-de37-44db-92c1-afe760bb699c" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.806180 4857 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.888400 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.888547 4857 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221" exitCode=137 Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.888615 4857 scope.go:117] "RemoveContainer" containerID="76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.888623 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.907756 4857 scope.go:117] "RemoveContainer" containerID="76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221" Nov 28 13:33:56 crc kubenswrapper[4857]: E1128 13:33:56.908340 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221\": container with ID starting with 76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221 not found: ID does not exist" containerID="76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221" Nov 28 13:33:56 crc kubenswrapper[4857]: I1128 13:33:56.908393 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221"} err="failed to get container status \"76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221\": rpc error: code = NotFound desc = could not find container \"76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221\": container with ID starting with 76be4e3ed228cf4cbdccd07494ae0bccb4fd916b41d2e463a97a1dd5f4c01221 not found: ID does not exist" Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.238515 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.239484 4857 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.251662 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.251706 4857 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="a66b7126-a619-470a-95a0-dc7cfa80a097" Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.256528 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 28 13:33:58 crc kubenswrapper[4857]: I1128 13:33:58.256582 4857 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="a66b7126-a619-470a-95a0-dc7cfa80a097" Nov 28 13:34:00 crc kubenswrapper[4857]: I1128 13:34:00.561193 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 28 13:34:00 crc kubenswrapper[4857]: I1128 13:34:00.958054 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 28 13:34:01 crc kubenswrapper[4857]: I1128 13:34:01.843151 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 28 13:34:04 crc kubenswrapper[4857]: I1128 13:34:04.379872 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 28 13:34:04 crc kubenswrapper[4857]: I1128 13:34:04.672510 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 28 13:34:04 crc kubenswrapper[4857]: I1128 13:34:04.892798 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 28 13:34:04 crc kubenswrapper[4857]: I1128 13:34:04.927151 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 28 13:34:05 crc kubenswrapper[4857]: I1128 13:34:05.083903 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 28 13:34:05 crc kubenswrapper[4857]: I1128 13:34:05.181118 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 28 13:34:05 crc kubenswrapper[4857]: I1128 13:34:05.230303 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 28 13:34:07 crc kubenswrapper[4857]: I1128 13:34:07.064144 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 28 13:34:07 crc kubenswrapper[4857]: I1128 13:34:07.454214 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 28 13:34:07 crc kubenswrapper[4857]: I1128 13:34:07.622342 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 28 13:34:07 crc kubenswrapper[4857]: I1128 13:34:07.749278 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 28 13:34:08 crc kubenswrapper[4857]: I1128 13:34:08.183182 4857 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 28 13:34:08 crc kubenswrapper[4857]: I1128 13:34:08.479876 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 28 13:34:08 crc kubenswrapper[4857]: I1128 13:34:08.503734 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 28 13:34:08 crc kubenswrapper[4857]: I1128 13:34:08.813221 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 28 13:34:08 crc kubenswrapper[4857]: I1128 13:34:08.841830 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 28 13:34:08 crc kubenswrapper[4857]: I1128 13:34:08.994404 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 28 13:34:09 crc kubenswrapper[4857]: I1128 13:34:09.446218 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 28 13:34:10 crc kubenswrapper[4857]: I1128 13:34:10.013526 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 28 13:34:10 crc kubenswrapper[4857]: I1128 13:34:10.889411 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:34:11 crc kubenswrapper[4857]: I1128 13:34:11.227996 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:11 crc kubenswrapper[4857]: I1128 13:34:11.228495 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:11 crc kubenswrapper[4857]: I1128 13:34:11.308463 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 28 13:34:11 crc kubenswrapper[4857]: I1128 13:34:11.905293 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:34:12 crc kubenswrapper[4857]: I1128 13:34:12.324586 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 28 13:34:12 crc kubenswrapper[4857]: I1128 13:34:12.515115 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 28 13:34:12 crc kubenswrapper[4857]: I1128 13:34:12.541532 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 28 13:34:12 crc kubenswrapper[4857]: I1128 13:34:12.692682 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 28 13:34:13 crc kubenswrapper[4857]: I1128 13:34:13.013776 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 28 13:34:13 crc kubenswrapper[4857]: I1128 13:34:13.173487 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 28 13:34:13 crc kubenswrapper[4857]: I1128 13:34:13.351229 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 28 13:34:14 crc kubenswrapper[4857]: E1128 13:34:14.070907 4857 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 13:34:14 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee" Netns:"/var/run/netns/a55b2090-d5ab-4f53-8ec6-03cef9d640e2" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:34:14 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:34:14 crc kubenswrapper[4857]: > Nov 28 13:34:14 crc kubenswrapper[4857]: E1128 13:34:14.071353 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 13:34:14 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee" Netns:"/var/run/netns/a55b2090-d5ab-4f53-8ec6-03cef9d640e2" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:34:14 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:34:14 crc kubenswrapper[4857]: > pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:14 crc kubenswrapper[4857]: E1128 13:34:14.071375 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Nov 28 13:34:14 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee" Netns:"/var/run/netns/a55b2090-d5ab-4f53-8ec6-03cef9d640e2" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod "oauth-openshift-d878cb77-rsxww" not found Nov 28 13:34:14 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:34:14 crc kubenswrapper[4857]: > pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:14 crc kubenswrapper[4857]: E1128 13:34:14.071434 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-d878cb77-rsxww_openshift-authentication(0515d4e1-de37-44db-92c1-afe760bb699c)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-d878cb77-rsxww_openshift-authentication(0515d4e1-de37-44db-92c1-afe760bb699c)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-d878cb77-rsxww_openshift-authentication_0515d4e1-de37-44db-92c1-afe760bb699c_0(9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee): error adding pod openshift-authentication_oauth-openshift-d878cb77-rsxww to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee\\\" Netns:\\\"/var/run/netns/a55b2090-d5ab-4f53-8ec6-03cef9d640e2\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-d878cb77-rsxww;K8S_POD_INFRA_CONTAINER_ID=9edc6c5629d77e30c3b65e0df12cb5a84222b473f304c484c6c83bc6ccc228ee;K8S_POD_UID=0515d4e1-de37-44db-92c1-afe760bb699c\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-d878cb77-rsxww] networking: Multus: [openshift-authentication/oauth-openshift-d878cb77-rsxww/0515d4e1-de37-44db-92c1-afe760bb699c]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-d878cb77-rsxww in out of cluster comm: pod \\\"oauth-openshift-d878cb77-rsxww\\\" not found\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" podUID="0515d4e1-de37-44db-92c1-afe760bb699c" Nov 28 13:34:14 crc kubenswrapper[4857]: I1128 13:34:14.108458 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 28 13:34:14 crc kubenswrapper[4857]: I1128 13:34:14.192481 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 28 13:34:14 crc kubenswrapper[4857]: I1128 13:34:14.682004 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:34:15 crc kubenswrapper[4857]: I1128 13:34:15.363490 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 28 13:34:15 crc kubenswrapper[4857]: I1128 13:34:15.365530 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 28 13:34:15 crc kubenswrapper[4857]: I1128 13:34:15.476698 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:34:15 crc kubenswrapper[4857]: I1128 13:34:15.551920 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 28 13:34:15 crc kubenswrapper[4857]: I1128 13:34:15.638975 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 28 13:34:16 crc kubenswrapper[4857]: I1128 13:34:16.159197 4857 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 28 13:34:16 crc kubenswrapper[4857]: I1128 13:34:16.730906 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 28 13:34:17 crc kubenswrapper[4857]: I1128 13:34:17.261238 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 28 13:34:17 crc kubenswrapper[4857]: I1128 13:34:17.542202 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 28 13:34:17 crc kubenswrapper[4857]: I1128 13:34:17.617502 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 28 13:34:18 crc kubenswrapper[4857]: I1128 13:34:18.353608 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 28 13:34:18 crc kubenswrapper[4857]: I1128 13:34:18.918512 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.010278 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.012344 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.012404 4857 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="64d9a46039ca93b2fdd84dfed721e48ee0a3e7ec02d62e36c70b77f91d756a8a" exitCode=137 Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.012455 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"64d9a46039ca93b2fdd84dfed721e48ee0a3e7ec02d62e36c70b77f91d756a8a"} Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.012490 4857 scope.go:117] "RemoveContainer" containerID="669301387e20fdba765e20950b1d62685d72e3b7ccf059400428b9a26c38a7f0" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.113739 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.197913 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.585603 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.590688 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 28 13:34:19 crc kubenswrapper[4857]: I1128 13:34:19.948855 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.020901 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.021988 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"716eb8b252736de99d0d845dfde1801de125ef29816637c13eedab283ac92308"} Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.117896 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.180684 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.367668 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.595174 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.800083 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 28 13:34:20 crc kubenswrapper[4857]: I1128 13:34:20.974382 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 28 13:34:21 crc kubenswrapper[4857]: I1128 13:34:21.223356 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 28 13:34:21 crc kubenswrapper[4857]: I1128 13:34:21.444696 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 28 13:34:21 crc kubenswrapper[4857]: I1128 13:34:21.456749 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 28 13:34:21 crc kubenswrapper[4857]: I1128 13:34:21.488602 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 28 13:34:21 crc kubenswrapper[4857]: I1128 13:34:21.670182 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 28 13:34:21 crc kubenswrapper[4857]: I1128 13:34:21.829977 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 28 13:34:22 crc kubenswrapper[4857]: I1128 13:34:22.046602 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 28 13:34:22 crc kubenswrapper[4857]: I1128 13:34:22.501896 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 28 13:34:22 crc kubenswrapper[4857]: I1128 13:34:22.881763 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 28 13:34:22 crc kubenswrapper[4857]: I1128 13:34:22.960098 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 28 13:34:23 crc kubenswrapper[4857]: I1128 13:34:23.041164 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 28 13:34:23 crc kubenswrapper[4857]: I1128 13:34:23.178910 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 28 13:34:23 crc kubenswrapper[4857]: I1128 13:34:23.246199 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 28 13:34:23 crc kubenswrapper[4857]: I1128 13:34:23.718645 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:34:23 crc kubenswrapper[4857]: I1128 13:34:23.959102 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.294128 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.633209 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.673304 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.682901 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.765135 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.781881 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 28 13:34:24 crc kubenswrapper[4857]: I1128 13:34:24.924749 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 28 13:34:25 crc kubenswrapper[4857]: I1128 13:34:25.006986 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 28 13:34:25 crc kubenswrapper[4857]: I1128 13:34:25.027014 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 28 13:34:25 crc kubenswrapper[4857]: I1128 13:34:25.240682 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 28 13:34:25 crc kubenswrapper[4857]: I1128 13:34:25.464039 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 28 13:34:25 crc kubenswrapper[4857]: I1128 13:34:25.555354 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 28 13:34:25 crc kubenswrapper[4857]: I1128 13:34:25.713657 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:34:26 crc kubenswrapper[4857]: I1128 13:34:26.079969 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 28 13:34:26 crc kubenswrapper[4857]: I1128 13:34:26.200891 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 28 13:34:26 crc kubenswrapper[4857]: I1128 13:34:26.248102 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 28 13:34:26 crc kubenswrapper[4857]: I1128 13:34:26.602579 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.060056 4857 generic.go:334] "Generic (PLEG): container finished" podID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerID="2e95b2d97bc8cd52a9710f0be7eebcda78f01e35a0358077b4001bb6a52e2ac6" exitCode=0 Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.060108 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" event={"ID":"de8315dd-951d-4fe6-a8a9-bc4dd3094743","Type":"ContainerDied","Data":"2e95b2d97bc8cd52a9710f0be7eebcda78f01e35a0358077b4001bb6a52e2ac6"} Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.060563 4857 scope.go:117] "RemoveContainer" containerID="2e95b2d97bc8cd52a9710f0be7eebcda78f01e35a0358077b4001bb6a52e2ac6" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.216730 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.457899 4857 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.467524 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.970787 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:34:27 crc kubenswrapper[4857]: I1128 13:34:27.975260 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.066883 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" event={"ID":"de8315dd-951d-4fe6-a8a9-bc4dd3094743","Type":"ContainerStarted","Data":"44347af9a835a3f5f3e86e0ea9846fc04acef0c3f502950bf5960044eaa00c80"} Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.067610 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.069441 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.145410 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.270490 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.547406 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.579784 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 28 13:34:28 crc kubenswrapper[4857]: I1128 13:34:28.970350 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.117058 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.168783 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.228594 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.229059 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.423239 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-d878cb77-rsxww"] Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.664178 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.735279 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 28 13:34:29 crc kubenswrapper[4857]: I1128 13:34:29.924224 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.077391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" event={"ID":"0515d4e1-de37-44db-92c1-afe760bb699c","Type":"ContainerStarted","Data":"5e63049349267049c0cf64d2fc7a9f1020448ce39b8aaa8ae70922ca7f983c8f"} Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.077823 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" event={"ID":"0515d4e1-de37-44db-92c1-afe760bb699c","Type":"ContainerStarted","Data":"3717a945383700ee48991e7b6eaad3b573ae8a0e1593779f91c14dad3d179f08"} Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.098914 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" podStartSLOduration=95.098896539 podStartE2EDuration="1m35.098896539s" podCreationTimestamp="2025-11-28 13:32:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:30.095174918 +0000 UTC m=+320.219116345" watchObservedRunningTime="2025-11-28 13:34:30.098896539 +0000 UTC m=+320.222837966" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.280739 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.286352 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-d878cb77-rsxww" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.314090 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.388963 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:34:30 crc kubenswrapper[4857]: I1128 13:34:30.780972 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 28 13:34:31 crc kubenswrapper[4857]: I1128 13:34:31.114871 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 28 13:34:31 crc kubenswrapper[4857]: I1128 13:34:31.320086 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 28 13:34:32 crc kubenswrapper[4857]: I1128 13:34:32.065535 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 28 13:34:34 crc kubenswrapper[4857]: I1128 13:34:34.701062 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 28 13:34:35 crc kubenswrapper[4857]: I1128 13:34:35.135884 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 28 13:34:35 crc kubenswrapper[4857]: I1128 13:34:35.717377 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 28 13:34:36 crc kubenswrapper[4857]: I1128 13:34:36.037633 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 28 13:34:46 crc kubenswrapper[4857]: I1128 13:34:46.697209 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cvb79"] Nov 28 13:34:46 crc kubenswrapper[4857]: I1128 13:34:46.697993 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" podUID="7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" containerName="controller-manager" containerID="cri-o://c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887" gracePeriod=30 Nov 28 13:34:46 crc kubenswrapper[4857]: I1128 13:34:46.831472 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw"] Nov 28 13:34:46 crc kubenswrapper[4857]: I1128 13:34:46.831729 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" podUID="844ce19a-9c82-4a37-8170-db724fabc85c" containerName="route-controller-manager" containerID="cri-o://78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee" gracePeriod=30 Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.063897 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.136033 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-proxy-ca-bundles\") pod \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.136152 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-serving-cert\") pod \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.136209 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-client-ca\") pod \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.136238 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-config\") pod \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.136283 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dmjnv\" (UniqueName: \"kubernetes.io/projected/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-kube-api-access-dmjnv\") pod \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\" (UID: \"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.137207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" (UID: "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.137474 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-config" (OuterVolumeSpecName: "config") pod "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" (UID: "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.141572 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-client-ca" (OuterVolumeSpecName: "client-ca") pod "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" (UID: "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.142653 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-kube-api-access-dmjnv" (OuterVolumeSpecName: "kube-api-access-dmjnv") pod "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" (UID: "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558"). InnerVolumeSpecName "kube-api-access-dmjnv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.148207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" (UID: "7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.162308 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.170774 4857 generic.go:334] "Generic (PLEG): container finished" podID="7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" containerID="c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887" exitCode=0 Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.170879 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" event={"ID":"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558","Type":"ContainerDied","Data":"c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887"} Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.170927 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" event={"ID":"7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558","Type":"ContainerDied","Data":"efbb52fbe69a9f85b7e950dee6a37a78c9bd15fa14fcc36abcec73c2c9a8da17"} Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.170974 4857 scope.go:117] "RemoveContainer" containerID="c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.171153 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-cvb79" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.173457 4857 generic.go:334] "Generic (PLEG): container finished" podID="844ce19a-9c82-4a37-8170-db724fabc85c" containerID="78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee" exitCode=0 Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.173552 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" event={"ID":"844ce19a-9c82-4a37-8170-db724fabc85c","Type":"ContainerDied","Data":"78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee"} Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.173611 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" event={"ID":"844ce19a-9c82-4a37-8170-db724fabc85c","Type":"ContainerDied","Data":"28eb68cc2b1d3c120c5ffaae328293910313ad150c80bc7249163cef1a0f40c3"} Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.173833 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.190202 4857 scope.go:117] "RemoveContainer" containerID="c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887" Nov 28 13:34:47 crc kubenswrapper[4857]: E1128 13:34:47.191471 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887\": container with ID starting with c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887 not found: ID does not exist" containerID="c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.191535 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887"} err="failed to get container status \"c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887\": rpc error: code = NotFound desc = could not find container \"c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887\": container with ID starting with c046b374876ab426bd4bdcf4f7a1af86bae6e3ccceb33ad14f3fc2d593fd2887 not found: ID does not exist" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.191570 4857 scope.go:117] "RemoveContainer" containerID="78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.219440 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cvb79"] Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.223502 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-cvb79"] Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.225169 4857 scope.go:117] "RemoveContainer" containerID="78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee" Nov 28 13:34:47 crc kubenswrapper[4857]: E1128 13:34:47.225549 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee\": container with ID starting with 78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee not found: ID does not exist" containerID="78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.225589 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee"} err="failed to get container status \"78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee\": rpc error: code = NotFound desc = could not find container \"78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee\": container with ID starting with 78003f48e7796089770df9e21e3f3293c7b547bdb1246368e6b5f786178d3fee not found: ID does not exist" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.237902 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5bbd\" (UniqueName: \"kubernetes.io/projected/844ce19a-9c82-4a37-8170-db724fabc85c-kube-api-access-g5bbd\") pod \"844ce19a-9c82-4a37-8170-db724fabc85c\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.238016 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/844ce19a-9c82-4a37-8170-db724fabc85c-serving-cert\") pod \"844ce19a-9c82-4a37-8170-db724fabc85c\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.238148 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-config\") pod \"844ce19a-9c82-4a37-8170-db724fabc85c\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.238262 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-client-ca\") pod \"844ce19a-9c82-4a37-8170-db724fabc85c\" (UID: \"844ce19a-9c82-4a37-8170-db724fabc85c\") " Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.238991 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dmjnv\" (UniqueName: \"kubernetes.io/projected/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-kube-api-access-dmjnv\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.239021 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.239059 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.239074 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.239087 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.239338 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-config" (OuterVolumeSpecName: "config") pod "844ce19a-9c82-4a37-8170-db724fabc85c" (UID: "844ce19a-9c82-4a37-8170-db724fabc85c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.239861 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-client-ca" (OuterVolumeSpecName: "client-ca") pod "844ce19a-9c82-4a37-8170-db724fabc85c" (UID: "844ce19a-9c82-4a37-8170-db724fabc85c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.242490 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/844ce19a-9c82-4a37-8170-db724fabc85c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "844ce19a-9c82-4a37-8170-db724fabc85c" (UID: "844ce19a-9c82-4a37-8170-db724fabc85c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.242525 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/844ce19a-9c82-4a37-8170-db724fabc85c-kube-api-access-g5bbd" (OuterVolumeSpecName: "kube-api-access-g5bbd") pod "844ce19a-9c82-4a37-8170-db724fabc85c" (UID: "844ce19a-9c82-4a37-8170-db724fabc85c"). InnerVolumeSpecName "kube-api-access-g5bbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.339916 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.340044 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5bbd\" (UniqueName: \"kubernetes.io/projected/844ce19a-9c82-4a37-8170-db724fabc85c-kube-api-access-g5bbd\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.340061 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/844ce19a-9c82-4a37-8170-db724fabc85c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.340073 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/844ce19a-9c82-4a37-8170-db724fabc85c-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.500453 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw"] Nov 28 13:34:47 crc kubenswrapper[4857]: I1128 13:34:47.504654 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-m9bfw"] Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.239358 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" path="/var/lib/kubelet/pods/7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558/volumes" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.240382 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="844ce19a-9c82-4a37-8170-db724fabc85c" path="/var/lib/kubelet/pods/844ce19a-9c82-4a37-8170-db724fabc85c/volumes" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454512 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m"] Nov 28 13:34:48 crc kubenswrapper[4857]: E1128 13:34:48.454738 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" containerName="controller-manager" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454752 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" containerName="controller-manager" Nov 28 13:34:48 crc kubenswrapper[4857]: E1128 13:34:48.454779 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="844ce19a-9c82-4a37-8170-db724fabc85c" containerName="route-controller-manager" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454786 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="844ce19a-9c82-4a37-8170-db724fabc85c" containerName="route-controller-manager" Nov 28 13:34:48 crc kubenswrapper[4857]: E1128 13:34:48.454796 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454801 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454902 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7cab1b93-9ab5-4fdb-b6f6-d86a1a50b558" containerName="controller-manager" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454912 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.454926 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="844ce19a-9c82-4a37-8170-db724fabc85c" containerName="route-controller-manager" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.455350 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.457563 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8dd6874d8-8ncsj"] Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.457867 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.458134 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.458572 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.458760 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.459924 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.460192 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.460439 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.460658 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.460813 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.460907 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.462541 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.463766 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.463993 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.471006 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m"] Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.471632 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.475648 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8dd6874d8-8ncsj"] Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.553084 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-client-ca\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.553476 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-proxy-ca-bundles\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.553612 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14dcc1b3-de6c-46f8-be96-5d5f4053add0-serving-cert\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.553727 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-client-ca\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.553856 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gqtr\" (UniqueName: \"kubernetes.io/projected/60cf556d-3dab-42f4-b924-b55ac0826dcc-kube-api-access-8gqtr\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.553991 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-config\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.554105 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60cf556d-3dab-42f4-b924-b55ac0826dcc-serving-cert\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.554203 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-config\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.554321 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgvjp\" (UniqueName: \"kubernetes.io/projected/14dcc1b3-de6c-46f8-be96-5d5f4053add0-kube-api-access-fgvjp\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.655662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-client-ca\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.655985 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14dcc1b3-de6c-46f8-be96-5d5f4053add0-serving-cert\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656133 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gqtr\" (UniqueName: \"kubernetes.io/projected/60cf556d-3dab-42f4-b924-b55ac0826dcc-kube-api-access-8gqtr\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656284 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-config\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656412 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60cf556d-3dab-42f4-b924-b55ac0826dcc-serving-cert\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-config\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656653 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgvjp\" (UniqueName: \"kubernetes.io/projected/14dcc1b3-de6c-46f8-be96-5d5f4053add0-kube-api-access-fgvjp\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656780 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-client-ca\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656898 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-proxy-ca-bundles\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.656973 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-client-ca\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.658181 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-config\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.658372 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-client-ca\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.658879 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-proxy-ca-bundles\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.660267 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60cf556d-3dab-42f4-b924-b55ac0826dcc-serving-cert\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.661398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14dcc1b3-de6c-46f8-be96-5d5f4053add0-serving-cert\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.669294 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-config\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.687714 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gqtr\" (UniqueName: \"kubernetes.io/projected/60cf556d-3dab-42f4-b924-b55ac0826dcc-kube-api-access-8gqtr\") pod \"route-controller-manager-76ffc9c5fb-9rb9m\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.687916 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgvjp\" (UniqueName: \"kubernetes.io/projected/14dcc1b3-de6c-46f8-be96-5d5f4053add0-kube-api-access-fgvjp\") pod \"controller-manager-8dd6874d8-8ncsj\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.775561 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:48 crc kubenswrapper[4857]: I1128 13:34:48.792739 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:49 crc kubenswrapper[4857]: I1128 13:34:49.058332 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m"] Nov 28 13:34:49 crc kubenswrapper[4857]: I1128 13:34:49.125204 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8dd6874d8-8ncsj"] Nov 28 13:34:49 crc kubenswrapper[4857]: I1128 13:34:49.189755 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" event={"ID":"60cf556d-3dab-42f4-b924-b55ac0826dcc","Type":"ContainerStarted","Data":"a7f6e9c6fb3b5aa46aff81cfa9d1676b64ece971a40097db8fe36767e4bc83e6"} Nov 28 13:34:49 crc kubenswrapper[4857]: I1128 13:34:49.191065 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" event={"ID":"14dcc1b3-de6c-46f8-be96-5d5f4053add0","Type":"ContainerStarted","Data":"75e99119a8a3297beb396fa395dc677492de7332ed8601f3af0a4e7b546246f8"} Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.199559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" event={"ID":"14dcc1b3-de6c-46f8-be96-5d5f4053add0","Type":"ContainerStarted","Data":"6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085"} Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.201378 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.202394 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" event={"ID":"60cf556d-3dab-42f4-b924-b55ac0826dcc","Type":"ContainerStarted","Data":"453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb"} Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.203116 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.204711 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.208118 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.222923 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" podStartSLOduration=4.222896127 podStartE2EDuration="4.222896127s" podCreationTimestamp="2025-11-28 13:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:50.222052483 +0000 UTC m=+340.345993940" watchObservedRunningTime="2025-11-28 13:34:50.222896127 +0000 UTC m=+340.346837554" Nov 28 13:34:50 crc kubenswrapper[4857]: I1128 13:34:50.260167 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" podStartSLOduration=4.260147463 podStartE2EDuration="4.260147463s" podCreationTimestamp="2025-11-28 13:34:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:50.257582739 +0000 UTC m=+340.381524186" watchObservedRunningTime="2025-11-28 13:34:50.260147463 +0000 UTC m=+340.384088900" Nov 28 13:34:51 crc kubenswrapper[4857]: I1128 13:34:51.960436 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8dd6874d8-8ncsj"] Nov 28 13:34:51 crc kubenswrapper[4857]: I1128 13:34:51.985406 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m"] Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.216182 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" podUID="60cf556d-3dab-42f4-b924-b55ac0826dcc" containerName="route-controller-manager" containerID="cri-o://453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb" gracePeriod=30 Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.216391 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" podUID="14dcc1b3-de6c-46f8-be96-5d5f4053add0" containerName="controller-manager" containerID="cri-o://6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085" gracePeriod=30 Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.605014 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.627614 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gqtr\" (UniqueName: \"kubernetes.io/projected/60cf556d-3dab-42f4-b924-b55ac0826dcc-kube-api-access-8gqtr\") pod \"60cf556d-3dab-42f4-b924-b55ac0826dcc\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.627919 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-config\") pod \"60cf556d-3dab-42f4-b924-b55ac0826dcc\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.627964 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60cf556d-3dab-42f4-b924-b55ac0826dcc-serving-cert\") pod \"60cf556d-3dab-42f4-b924-b55ac0826dcc\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.628006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-client-ca\") pod \"60cf556d-3dab-42f4-b924-b55ac0826dcc\" (UID: \"60cf556d-3dab-42f4-b924-b55ac0826dcc\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.629039 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-client-ca" (OuterVolumeSpecName: "client-ca") pod "60cf556d-3dab-42f4-b924-b55ac0826dcc" (UID: "60cf556d-3dab-42f4-b924-b55ac0826dcc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.629454 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-config" (OuterVolumeSpecName: "config") pod "60cf556d-3dab-42f4-b924-b55ac0826dcc" (UID: "60cf556d-3dab-42f4-b924-b55ac0826dcc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.638361 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60cf556d-3dab-42f4-b924-b55ac0826dcc-kube-api-access-8gqtr" (OuterVolumeSpecName: "kube-api-access-8gqtr") pod "60cf556d-3dab-42f4-b924-b55ac0826dcc" (UID: "60cf556d-3dab-42f4-b924-b55ac0826dcc"). InnerVolumeSpecName "kube-api-access-8gqtr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.639815 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60cf556d-3dab-42f4-b924-b55ac0826dcc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "60cf556d-3dab-42f4-b924-b55ac0826dcc" (UID: "60cf556d-3dab-42f4-b924-b55ac0826dcc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.657548 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c"] Nov 28 13:34:53 crc kubenswrapper[4857]: E1128 13:34:53.658238 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60cf556d-3dab-42f4-b924-b55ac0826dcc" containerName="route-controller-manager" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.658259 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="60cf556d-3dab-42f4-b924-b55ac0826dcc" containerName="route-controller-manager" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.658365 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="60cf556d-3dab-42f4-b924-b55ac0826dcc" containerName="route-controller-manager" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.658760 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.660690 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c"] Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.709090 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.730660 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-proxy-ca-bundles\") pod \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.730733 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-client-ca\") pod \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.730781 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-config\") pod \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.731728 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-client-ca" (OuterVolumeSpecName: "client-ca") pod "14dcc1b3-de6c-46f8-be96-5d5f4053add0" (UID: "14dcc1b3-de6c-46f8-be96-5d5f4053add0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.731842 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-config" (OuterVolumeSpecName: "config") pod "14dcc1b3-de6c-46f8-be96-5d5f4053add0" (UID: "14dcc1b3-de6c-46f8-be96-5d5f4053add0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.732420 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "14dcc1b3-de6c-46f8-be96-5d5f4053add0" (UID: "14dcc1b3-de6c-46f8-be96-5d5f4053add0"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.733038 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14dcc1b3-de6c-46f8-be96-5d5f4053add0-serving-cert\") pod \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.733546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgvjp\" (UniqueName: \"kubernetes.io/projected/14dcc1b3-de6c-46f8-be96-5d5f4053add0-kube-api-access-fgvjp\") pod \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\" (UID: \"14dcc1b3-de6c-46f8-be96-5d5f4053add0\") " Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.734795 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-config\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.734886 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf9sw\" (UniqueName: \"kubernetes.io/projected/00320753-df8b-4bf5-abd3-87a8b848852b-kube-api-access-kf9sw\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.736684 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14dcc1b3-de6c-46f8-be96-5d5f4053add0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "14dcc1b3-de6c-46f8-be96-5d5f4053add0" (UID: "14dcc1b3-de6c-46f8-be96-5d5f4053add0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.737835 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14dcc1b3-de6c-46f8-be96-5d5f4053add0-kube-api-access-fgvjp" (OuterVolumeSpecName: "kube-api-access-fgvjp") pod "14dcc1b3-de6c-46f8-be96-5d5f4053add0" (UID: "14dcc1b3-de6c-46f8-be96-5d5f4053add0"). InnerVolumeSpecName "kube-api-access-fgvjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.737997 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00320753-df8b-4bf5-abd3-87a8b848852b-serving-cert\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738032 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-client-ca\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738291 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14dcc1b3-de6c-46f8-be96-5d5f4053add0-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738316 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgvjp\" (UniqueName: \"kubernetes.io/projected/14dcc1b3-de6c-46f8-be96-5d5f4053add0-kube-api-access-fgvjp\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738330 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gqtr\" (UniqueName: \"kubernetes.io/projected/60cf556d-3dab-42f4-b924-b55ac0826dcc-kube-api-access-8gqtr\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738342 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738355 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738364 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/60cf556d-3dab-42f4-b924-b55ac0826dcc-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738375 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738385 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/60cf556d-3dab-42f4-b924-b55ac0826dcc-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.738394 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/14dcc1b3-de6c-46f8-be96-5d5f4053add0-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.840155 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-config\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.840228 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf9sw\" (UniqueName: \"kubernetes.io/projected/00320753-df8b-4bf5-abd3-87a8b848852b-kube-api-access-kf9sw\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.840259 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00320753-df8b-4bf5-abd3-87a8b848852b-serving-cert\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.840283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-client-ca\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.841605 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-client-ca\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.842088 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-config\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.848867 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00320753-df8b-4bf5-abd3-87a8b848852b-serving-cert\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:53 crc kubenswrapper[4857]: I1128 13:34:53.858156 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf9sw\" (UniqueName: \"kubernetes.io/projected/00320753-df8b-4bf5-abd3-87a8b848852b-kube-api-access-kf9sw\") pod \"route-controller-manager-7d8d569866-n674c\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.006264 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.224934 4857 generic.go:334] "Generic (PLEG): container finished" podID="14dcc1b3-de6c-46f8-be96-5d5f4053add0" containerID="6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085" exitCode=0 Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.225162 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" event={"ID":"14dcc1b3-de6c-46f8-be96-5d5f4053add0","Type":"ContainerDied","Data":"6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085"} Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.225730 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" event={"ID":"14dcc1b3-de6c-46f8-be96-5d5f4053add0","Type":"ContainerDied","Data":"75e99119a8a3297beb396fa395dc677492de7332ed8601f3af0a4e7b546246f8"} Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.225754 4857 scope.go:117] "RemoveContainer" containerID="6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.225246 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8dd6874d8-8ncsj" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.230135 4857 generic.go:334] "Generic (PLEG): container finished" podID="60cf556d-3dab-42f4-b924-b55ac0826dcc" containerID="453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb" exitCode=0 Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.230196 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.239564 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" event={"ID":"60cf556d-3dab-42f4-b924-b55ac0826dcc","Type":"ContainerDied","Data":"453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb"} Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.239606 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m" event={"ID":"60cf556d-3dab-42f4-b924-b55ac0826dcc","Type":"ContainerDied","Data":"a7f6e9c6fb3b5aa46aff81cfa9d1676b64ece971a40097db8fe36767e4bc83e6"} Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.252211 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.259869 4857 scope.go:117] "RemoveContainer" containerID="6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085" Nov 28 13:34:54 crc kubenswrapper[4857]: E1128 13:34:54.260399 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085\": container with ID starting with 6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085 not found: ID does not exist" containerID="6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.260442 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085"} err="failed to get container status \"6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085\": rpc error: code = NotFound desc = could not find container \"6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085\": container with ID starting with 6ed9e017c871780ef6177acbe886a74c9ee6a1da2b0375ea94b790351ed31085 not found: ID does not exist" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.260491 4857 scope.go:117] "RemoveContainer" containerID="453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.263722 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8dd6874d8-8ncsj"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.270493 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8dd6874d8-8ncsj"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.273430 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.276646 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-76ffc9c5fb-9rb9m"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.278010 4857 scope.go:117] "RemoveContainer" containerID="453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb" Nov 28 13:34:54 crc kubenswrapper[4857]: E1128 13:34:54.278426 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb\": container with ID starting with 453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb not found: ID does not exist" containerID="453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.278464 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb"} err="failed to get container status \"453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb\": rpc error: code = NotFound desc = could not find container \"453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb\": container with ID starting with 453ab6a0b271b9881b374001e308e6b8a3174fd9101d17d6de5b0ef731b035bb not found: ID does not exist" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.779728 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.786180 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c"] Nov 28 13:34:54 crc kubenswrapper[4857]: E1128 13:34:54.786380 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14dcc1b3-de6c-46f8-be96-5d5f4053add0" containerName="controller-manager" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.786396 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="14dcc1b3-de6c-46f8-be96-5d5f4053add0" containerName="controller-manager" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.786490 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="14dcc1b3-de6c-46f8-be96-5d5f4053add0" containerName="controller-manager" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.786890 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.791344 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.791373 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.791849 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.791930 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.791940 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.799475 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.803067 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c"] Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.806047 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.958659 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-proxy-ca-bundles\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.959069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-serving-cert\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.959102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2m6n\" (UniqueName: \"kubernetes.io/projected/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-kube-api-access-n2m6n\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.959149 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-client-ca\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:54 crc kubenswrapper[4857]: I1128 13:34:54.959176 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-config\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.060752 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-proxy-ca-bundles\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.060928 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-serving-cert\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.061021 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2m6n\" (UniqueName: \"kubernetes.io/projected/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-kube-api-access-n2m6n\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.061179 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-client-ca\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.061295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-config\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.062175 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-client-ca\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.062824 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-config\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.062991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-proxy-ca-bundles\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.067006 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-serving-cert\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.086360 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2m6n\" (UniqueName: \"kubernetes.io/projected/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-kube-api-access-n2m6n\") pod \"controller-manager-7c5cccfd57-rpr9c\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.103176 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.240288 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" event={"ID":"00320753-df8b-4bf5-abd3-87a8b848852b","Type":"ContainerStarted","Data":"5a260cab254abf2d4c80c0172732a3b8ed42de721106b5d34d6ca6aa30955fe2"} Nov 28 13:34:55 crc kubenswrapper[4857]: I1128 13:34:55.341393 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c"] Nov 28 13:34:55 crc kubenswrapper[4857]: W1128 13:34:55.349145 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb58e4d8b_2bef_4363_a2ad_5467b7a5b11b.slice/crio-4c1ea47a71e097f20b269ff703f450cb714058d61e35f0ac154629b5254fe702 WatchSource:0}: Error finding container 4c1ea47a71e097f20b269ff703f450cb714058d61e35f0ac154629b5254fe702: Status 404 returned error can't find the container with id 4c1ea47a71e097f20b269ff703f450cb714058d61e35f0ac154629b5254fe702 Nov 28 13:34:56 crc kubenswrapper[4857]: I1128 13:34:56.235827 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14dcc1b3-de6c-46f8-be96-5d5f4053add0" path="/var/lib/kubelet/pods/14dcc1b3-de6c-46f8-be96-5d5f4053add0/volumes" Nov 28 13:34:56 crc kubenswrapper[4857]: I1128 13:34:56.237067 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60cf556d-3dab-42f4-b924-b55ac0826dcc" path="/var/lib/kubelet/pods/60cf556d-3dab-42f4-b924-b55ac0826dcc/volumes" Nov 28 13:34:56 crc kubenswrapper[4857]: I1128 13:34:56.247537 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" event={"ID":"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b","Type":"ContainerStarted","Data":"4c1ea47a71e097f20b269ff703f450cb714058d61e35f0ac154629b5254fe702"} Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.254701 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" event={"ID":"00320753-df8b-4bf5-abd3-87a8b848852b","Type":"ContainerStarted","Data":"ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81"} Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.254786 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" podUID="00320753-df8b-4bf5-abd3-87a8b848852b" containerName="route-controller-manager" containerID="cri-o://ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81" gracePeriod=30 Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.255124 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.256554 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" event={"ID":"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b","Type":"ContainerStarted","Data":"8262d522277fa60f591d3e224fe4ac0e0c8ed38dc7f3711331862cdd16e276fd"} Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.256825 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.261272 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.267084 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.276812 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" podStartSLOduration=6.276796414 podStartE2EDuration="6.276796414s" podCreationTimestamp="2025-11-28 13:34:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:57.274463867 +0000 UTC m=+347.398405324" watchObservedRunningTime="2025-11-28 13:34:57.276796414 +0000 UTC m=+347.400737851" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.332864 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" podStartSLOduration=3.332845448 podStartE2EDuration="3.332845448s" podCreationTimestamp="2025-11-28 13:34:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:57.332159038 +0000 UTC m=+347.456100495" watchObservedRunningTime="2025-11-28 13:34:57.332845448 +0000 UTC m=+347.456786875" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.622177 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.650472 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl"] Nov 28 13:34:57 crc kubenswrapper[4857]: E1128 13:34:57.650742 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00320753-df8b-4bf5-abd3-87a8b848852b" containerName="route-controller-manager" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.650757 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="00320753-df8b-4bf5-abd3-87a8b848852b" containerName="route-controller-manager" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.650857 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="00320753-df8b-4bf5-abd3-87a8b848852b" containerName="route-controller-manager" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.651267 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.658539 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl"] Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.789433 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kf9sw\" (UniqueName: \"kubernetes.io/projected/00320753-df8b-4bf5-abd3-87a8b848852b-kube-api-access-kf9sw\") pod \"00320753-df8b-4bf5-abd3-87a8b848852b\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791029 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-client-ca\") pod \"00320753-df8b-4bf5-abd3-87a8b848852b\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791185 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00320753-df8b-4bf5-abd3-87a8b848852b-serving-cert\") pod \"00320753-df8b-4bf5-abd3-87a8b848852b\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791320 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-config\") pod \"00320753-df8b-4bf5-abd3-87a8b848852b\" (UID: \"00320753-df8b-4bf5-abd3-87a8b848852b\") " Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791538 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-25lph\" (UniqueName: \"kubernetes.io/projected/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-kube-api-access-25lph\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791659 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-serving-cert\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791835 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-client-ca\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.791705 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-client-ca" (OuterVolumeSpecName: "client-ca") pod "00320753-df8b-4bf5-abd3-87a8b848852b" (UID: "00320753-df8b-4bf5-abd3-87a8b848852b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.792010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-config\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.792052 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-config" (OuterVolumeSpecName: "config") pod "00320753-df8b-4bf5-abd3-87a8b848852b" (UID: "00320753-df8b-4bf5-abd3-87a8b848852b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.792182 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.792204 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/00320753-df8b-4bf5-abd3-87a8b848852b-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.796092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00320753-df8b-4bf5-abd3-87a8b848852b-kube-api-access-kf9sw" (OuterVolumeSpecName: "kube-api-access-kf9sw") pod "00320753-df8b-4bf5-abd3-87a8b848852b" (UID: "00320753-df8b-4bf5-abd3-87a8b848852b"). InnerVolumeSpecName "kube-api-access-kf9sw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.798758 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00320753-df8b-4bf5-abd3-87a8b848852b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "00320753-df8b-4bf5-abd3-87a8b848852b" (UID: "00320753-df8b-4bf5-abd3-87a8b848852b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.893035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-25lph\" (UniqueName: \"kubernetes.io/projected/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-kube-api-access-25lph\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.893083 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-serving-cert\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.893112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-client-ca\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.893170 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-config\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.893234 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kf9sw\" (UniqueName: \"kubernetes.io/projected/00320753-df8b-4bf5-abd3-87a8b848852b-kube-api-access-kf9sw\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.893249 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/00320753-df8b-4bf5-abd3-87a8b848852b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.894624 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-client-ca\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.894718 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-config\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.897704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-serving-cert\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.909444 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-25lph\" (UniqueName: \"kubernetes.io/projected/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-kube-api-access-25lph\") pod \"route-controller-manager-864bf79549-gg8wl\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:57 crc kubenswrapper[4857]: I1128 13:34:57.967521 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.261799 4857 generic.go:334] "Generic (PLEG): container finished" podID="00320753-df8b-4bf5-abd3-87a8b848852b" containerID="ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81" exitCode=0 Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.261823 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.261864 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" event={"ID":"00320753-df8b-4bf5-abd3-87a8b848852b","Type":"ContainerDied","Data":"ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81"} Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.261894 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c" event={"ID":"00320753-df8b-4bf5-abd3-87a8b848852b","Type":"ContainerDied","Data":"5a260cab254abf2d4c80c0172732a3b8ed42de721106b5d34d6ca6aa30955fe2"} Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.261910 4857 scope.go:117] "RemoveContainer" containerID="ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81" Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.278841 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c"] Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.281739 4857 scope.go:117] "RemoveContainer" containerID="ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81" Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.282109 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-n674c"] Nov 28 13:34:58 crc kubenswrapper[4857]: E1128 13:34:58.282168 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81\": container with ID starting with ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81 not found: ID does not exist" containerID="ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81" Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.282206 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81"} err="failed to get container status \"ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81\": rpc error: code = NotFound desc = could not find container \"ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81\": container with ID starting with ab3c46b27ee3e2fe41399e8930e42fcc0ddfe210b5948b7e73b581a064efec81 not found: ID does not exist" Nov 28 13:34:58 crc kubenswrapper[4857]: I1128 13:34:58.368174 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl"] Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.270203 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" event={"ID":"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51","Type":"ContainerStarted","Data":"acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574"} Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.270250 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" event={"ID":"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51","Type":"ContainerStarted","Data":"9000da396f21991d51e9b69fdb847e9c478557cc35e412456f31d1a1de13a143"} Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.270456 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.277744 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:34:59 crc kubenswrapper[4857]: I1128 13:34:59.306206 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" podStartSLOduration=5.306187219 podStartE2EDuration="5.306187219s" podCreationTimestamp="2025-11-28 13:34:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:34:59.305121568 +0000 UTC m=+349.429063005" watchObservedRunningTime="2025-11-28 13:34:59.306187219 +0000 UTC m=+349.430128656" Nov 28 13:35:00 crc kubenswrapper[4857]: I1128 13:35:00.235599 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00320753-df8b-4bf5-abd3-87a8b848852b" path="/var/lib/kubelet/pods/00320753-df8b-4bf5-abd3-87a8b848852b/volumes" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.164503 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c"] Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.165386 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" podUID="b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" containerName="controller-manager" containerID="cri-o://8262d522277fa60f591d3e224fe4ac0e0c8ed38dc7f3711331862cdd16e276fd" gracePeriod=30 Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.314262 4857 generic.go:334] "Generic (PLEG): container finished" podID="b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" containerID="8262d522277fa60f591d3e224fe4ac0e0c8ed38dc7f3711331862cdd16e276fd" exitCode=0 Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.314314 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" event={"ID":"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b","Type":"ContainerDied","Data":"8262d522277fa60f591d3e224fe4ac0e0c8ed38dc7f3711331862cdd16e276fd"} Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.722237 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.821953 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-proxy-ca-bundles\") pod \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.822019 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2m6n\" (UniqueName: \"kubernetes.io/projected/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-kube-api-access-n2m6n\") pod \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.822103 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-serving-cert\") pod \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.822136 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-client-ca\") pod \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.822208 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-config\") pod \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\" (UID: \"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b\") " Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.822843 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" (UID: "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.822882 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-client-ca" (OuterVolumeSpecName: "client-ca") pod "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" (UID: "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.823017 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-config" (OuterVolumeSpecName: "config") pod "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" (UID: "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.830207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" (UID: "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.830228 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-kube-api-access-n2m6n" (OuterVolumeSpecName: "kube-api-access-n2m6n") pod "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" (UID: "b58e4d8b-2bef-4363-a2ad-5467b7a5b11b"). InnerVolumeSpecName "kube-api-access-n2m6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.923437 4857 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.923478 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2m6n\" (UniqueName: \"kubernetes.io/projected/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-kube-api-access-n2m6n\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.923490 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.923500 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:07 crc kubenswrapper[4857]: I1128 13:35:07.923509 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.320564 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" event={"ID":"b58e4d8b-2bef-4363-a2ad-5467b7a5b11b","Type":"ContainerDied","Data":"4c1ea47a71e097f20b269ff703f450cb714058d61e35f0ac154629b5254fe702"} Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.320632 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.320913 4857 scope.go:117] "RemoveContainer" containerID="8262d522277fa60f591d3e224fe4ac0e0c8ed38dc7f3711331862cdd16e276fd" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.349130 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c"] Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.353878 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7c5cccfd57-rpr9c"] Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.469982 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6f9986644b-xwwz6"] Nov 28 13:35:08 crc kubenswrapper[4857]: E1128 13:35:08.470901 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" containerName="controller-manager" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.470978 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" containerName="controller-manager" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.471551 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" containerName="controller-manager" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.472357 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.478111 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.478200 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.478572 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.478676 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.479187 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.480966 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.484882 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.497755 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6f9986644b-xwwz6"] Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.633584 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-config\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.633628 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-proxy-ca-bundles\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.633672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99sk6\" (UniqueName: \"kubernetes.io/projected/ee82c35a-204d-4561-a42a-e18579bcf3a7-kube-api-access-99sk6\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.633690 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee82c35a-204d-4561-a42a-e18579bcf3a7-serving-cert\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.633710 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-client-ca\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.735314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99sk6\" (UniqueName: \"kubernetes.io/projected/ee82c35a-204d-4561-a42a-e18579bcf3a7-kube-api-access-99sk6\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.735374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee82c35a-204d-4561-a42a-e18579bcf3a7-serving-cert\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.735404 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-client-ca\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.735582 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-config\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.735631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-proxy-ca-bundles\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.736585 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-client-ca\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.737030 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-proxy-ca-bundles\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.737199 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee82c35a-204d-4561-a42a-e18579bcf3a7-config\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.740977 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee82c35a-204d-4561-a42a-e18579bcf3a7-serving-cert\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.759842 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99sk6\" (UniqueName: \"kubernetes.io/projected/ee82c35a-204d-4561-a42a-e18579bcf3a7-kube-api-access-99sk6\") pod \"controller-manager-6f9986644b-xwwz6\" (UID: \"ee82c35a-204d-4561-a42a-e18579bcf3a7\") " pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:08 crc kubenswrapper[4857]: I1128 13:35:08.790814 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.049830 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6f9986644b-xwwz6"] Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.329188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" event={"ID":"ee82c35a-204d-4561-a42a-e18579bcf3a7","Type":"ContainerStarted","Data":"d1bdf0610e9c7ca6b96e375dd3087a456aeded4b5a2cad48b02defb070e62bad"} Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.329240 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" event={"ID":"ee82c35a-204d-4561-a42a-e18579bcf3a7","Type":"ContainerStarted","Data":"ae7215aaeb5caa3b7f83f6e4d7cb6d578cd4da2648c0e5060d1670d48d0cf530"} Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.329445 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.330856 4857 patch_prober.go:28] interesting pod/controller-manager-6f9986644b-xwwz6 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.68:8443/healthz\": dial tcp 10.217.0.68:8443: connect: connection refused" start-of-body= Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.330918 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" podUID="ee82c35a-204d-4561-a42a-e18579bcf3a7" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.68:8443/healthz\": dial tcp 10.217.0.68:8443: connect: connection refused" Nov 28 13:35:09 crc kubenswrapper[4857]: I1128 13:35:09.345314 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" podStartSLOduration=2.3452997079999998 podStartE2EDuration="2.345299708s" podCreationTimestamp="2025-11-28 13:35:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:35:09.343530898 +0000 UTC m=+359.467472345" watchObservedRunningTime="2025-11-28 13:35:09.345299708 +0000 UTC m=+359.469241145" Nov 28 13:35:10 crc kubenswrapper[4857]: I1128 13:35:10.236507 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b58e4d8b-2bef-4363-a2ad-5467b7a5b11b" path="/var/lib/kubelet/pods/b58e4d8b-2bef-4363-a2ad-5467b7a5b11b/volumes" Nov 28 13:35:10 crc kubenswrapper[4857]: I1128 13:35:10.339677 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6f9986644b-xwwz6" Nov 28 13:35:11 crc kubenswrapper[4857]: I1128 13:35:11.308887 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:35:11 crc kubenswrapper[4857]: I1128 13:35:11.309321 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.119876 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-4544f"] Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.120655 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.141068 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-4544f"] Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290221 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290307 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/97ca1940-68a1-4a0f-96e6-70da0a262faa-registry-certificates\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290383 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-registry-tls\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-bound-sa-token\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290433 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/97ca1940-68a1-4a0f-96e6-70da0a262faa-installation-pull-secrets\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290529 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/97ca1940-68a1-4a0f-96e6-70da0a262faa-trusted-ca\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/97ca1940-68a1-4a0f-96e6-70da0a262faa-ca-trust-extracted\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.290610 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzkd9\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-kube-api-access-dzkd9\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.313313 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392635 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/97ca1940-68a1-4a0f-96e6-70da0a262faa-trusted-ca\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392690 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/97ca1940-68a1-4a0f-96e6-70da0a262faa-ca-trust-extracted\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392728 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzkd9\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-kube-api-access-dzkd9\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392757 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/97ca1940-68a1-4a0f-96e6-70da0a262faa-registry-certificates\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392831 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-registry-tls\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392846 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-bound-sa-token\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.392864 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/97ca1940-68a1-4a0f-96e6-70da0a262faa-installation-pull-secrets\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.393462 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/97ca1940-68a1-4a0f-96e6-70da0a262faa-ca-trust-extracted\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.393861 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/97ca1940-68a1-4a0f-96e6-70da0a262faa-trusted-ca\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.394428 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/97ca1940-68a1-4a0f-96e6-70da0a262faa-registry-certificates\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.400350 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-registry-tls\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.406575 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/97ca1940-68a1-4a0f-96e6-70da0a262faa-installation-pull-secrets\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.408990 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzkd9\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-kube-api-access-dzkd9\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.411370 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/97ca1940-68a1-4a0f-96e6-70da0a262faa-bound-sa-token\") pod \"image-registry-66df7c8f76-4544f\" (UID: \"97ca1940-68a1-4a0f-96e6-70da0a262faa\") " pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.437352 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:13 crc kubenswrapper[4857]: I1128 13:35:13.844704 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-4544f"] Nov 28 13:35:14 crc kubenswrapper[4857]: I1128 13:35:14.359928 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" event={"ID":"97ca1940-68a1-4a0f-96e6-70da0a262faa","Type":"ContainerStarted","Data":"20113bfe8c683b1d24eca87a6c110a995c7e5dc3f6bbcff721c1b13033c8f3a3"} Nov 28 13:35:14 crc kubenswrapper[4857]: I1128 13:35:14.360012 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" event={"ID":"97ca1940-68a1-4a0f-96e6-70da0a262faa","Type":"ContainerStarted","Data":"e09e61fd1d630e059e58c8aa6c27aa18390bc29d6f5c1f45e884362b7d1cfd16"} Nov 28 13:35:14 crc kubenswrapper[4857]: I1128 13:35:14.360061 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:14 crc kubenswrapper[4857]: I1128 13:35:14.384856 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" podStartSLOduration=1.3848360610000001 podStartE2EDuration="1.384836061s" podCreationTimestamp="2025-11-28 13:35:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:35:14.380043024 +0000 UTC m=+364.503984461" watchObservedRunningTime="2025-11-28 13:35:14.384836061 +0000 UTC m=+364.508777488" Nov 28 13:35:26 crc kubenswrapper[4857]: I1128 13:35:26.712088 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl"] Nov 28 13:35:26 crc kubenswrapper[4857]: I1128 13:35:26.713224 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" podUID="bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" containerName="route-controller-manager" containerID="cri-o://acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574" gracePeriod=30 Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.198529 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.379405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-client-ca\") pod \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.379535 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-config\") pod \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.379612 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-25lph\" (UniqueName: \"kubernetes.io/projected/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-kube-api-access-25lph\") pod \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.379644 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-serving-cert\") pod \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\" (UID: \"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51\") " Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.380280 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-client-ca" (OuterVolumeSpecName: "client-ca") pod "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" (UID: "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.380542 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-config" (OuterVolumeSpecName: "config") pod "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" (UID: "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.385253 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" (UID: "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.385423 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-kube-api-access-25lph" (OuterVolumeSpecName: "kube-api-access-25lph") pod "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" (UID: "bf0aa6ad-65ab-400f-9e98-2e2e82daeb51"). InnerVolumeSpecName "kube-api-access-25lph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.434145 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" containerID="acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574" exitCode=0 Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.434194 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" event={"ID":"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51","Type":"ContainerDied","Data":"acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574"} Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.434226 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" event={"ID":"bf0aa6ad-65ab-400f-9e98-2e2e82daeb51","Type":"ContainerDied","Data":"9000da396f21991d51e9b69fdb847e9c478557cc35e412456f31d1a1de13a143"} Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.434248 4857 scope.go:117] "RemoveContainer" containerID="acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.434279 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.461279 4857 scope.go:117] "RemoveContainer" containerID="acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574" Nov 28 13:35:27 crc kubenswrapper[4857]: E1128 13:35:27.462011 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574\": container with ID starting with acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574 not found: ID does not exist" containerID="acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.462062 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574"} err="failed to get container status \"acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574\": rpc error: code = NotFound desc = could not find container \"acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574\": container with ID starting with acb8ed74be1e47c4fd179f6166d95c6ca27d3d1af5f318bc232fb03cc466e574 not found: ID does not exist" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.465842 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl"] Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.469683 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-864bf79549-gg8wl"] Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.481007 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.481048 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-25lph\" (UniqueName: \"kubernetes.io/projected/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-kube-api-access-25lph\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.481059 4857 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:27 crc kubenswrapper[4857]: I1128 13:35:27.481081 4857 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51-client-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.241463 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" path="/var/lib/kubelet/pods/bf0aa6ad-65ab-400f-9e98-2e2e82daeb51/volumes" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.487168 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h"] Nov 28 13:35:28 crc kubenswrapper[4857]: E1128 13:35:28.487557 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" containerName="route-controller-manager" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.487598 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" containerName="route-controller-manager" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.487825 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf0aa6ad-65ab-400f-9e98-2e2e82daeb51" containerName="route-controller-manager" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.488462 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.491757 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.492108 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.492519 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.492735 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.492906 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.493334 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.496831 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/043106ac-fc54-4542-9b3b-af2600273146-serving-cert\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.496900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/043106ac-fc54-4542-9b3b-af2600273146-config\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.496925 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/043106ac-fc54-4542-9b3b-af2600273146-client-ca\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.497212 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bppzg\" (UniqueName: \"kubernetes.io/projected/043106ac-fc54-4542-9b3b-af2600273146-kube-api-access-bppzg\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.503138 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h"] Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.598325 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/043106ac-fc54-4542-9b3b-af2600273146-serving-cert\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.598402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/043106ac-fc54-4542-9b3b-af2600273146-config\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.598426 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/043106ac-fc54-4542-9b3b-af2600273146-client-ca\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.598451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bppzg\" (UniqueName: \"kubernetes.io/projected/043106ac-fc54-4542-9b3b-af2600273146-kube-api-access-bppzg\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.600078 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/043106ac-fc54-4542-9b3b-af2600273146-client-ca\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.600571 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/043106ac-fc54-4542-9b3b-af2600273146-config\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.603897 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/043106ac-fc54-4542-9b3b-af2600273146-serving-cert\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.616354 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bppzg\" (UniqueName: \"kubernetes.io/projected/043106ac-fc54-4542-9b3b-af2600273146-kube-api-access-bppzg\") pod \"route-controller-manager-7d8d569866-84w6h\" (UID: \"043106ac-fc54-4542-9b3b-af2600273146\") " pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:28 crc kubenswrapper[4857]: I1128 13:35:28.811688 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:29 crc kubenswrapper[4857]: I1128 13:35:29.040264 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h"] Nov 28 13:35:29 crc kubenswrapper[4857]: I1128 13:35:29.446749 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" event={"ID":"043106ac-fc54-4542-9b3b-af2600273146","Type":"ContainerStarted","Data":"d704038371b53f43ae67a28d7a85bf12eb691b3340b6de4f96457a70ec18d56d"} Nov 28 13:35:29 crc kubenswrapper[4857]: I1128 13:35:29.446798 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" event={"ID":"043106ac-fc54-4542-9b3b-af2600273146","Type":"ContainerStarted","Data":"efe7c47d03754985fe84e6a6db08d490c42cee793ef8b7b3c313b24544a782ae"} Nov 28 13:35:29 crc kubenswrapper[4857]: I1128 13:35:29.447206 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:29 crc kubenswrapper[4857]: I1128 13:35:29.469525 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" podStartSLOduration=3.469504765 podStartE2EDuration="3.469504765s" podCreationTimestamp="2025-11-28 13:35:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:35:29.465609114 +0000 UTC m=+379.589550551" watchObservedRunningTime="2025-11-28 13:35:29.469504765 +0000 UTC m=+379.593446202" Nov 28 13:35:29 crc kubenswrapper[4857]: I1128 13:35:29.635653 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d8d569866-84w6h" Nov 28 13:35:33 crc kubenswrapper[4857]: I1128 13:35:33.443265 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-4544f" Nov 28 13:35:33 crc kubenswrapper[4857]: I1128 13:35:33.495390 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-98dmw"] Nov 28 13:35:41 crc kubenswrapper[4857]: I1128 13:35:41.309136 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:35:41 crc kubenswrapper[4857]: I1128 13:35:41.309920 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:35:58 crc kubenswrapper[4857]: I1128 13:35:58.561736 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" podUID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" containerName="registry" containerID="cri-o://fb77c744e578a75560342b7129cb73ae7ede546df1dada9abeb9ead237469695" gracePeriod=30 Nov 28 13:35:58 crc kubenswrapper[4857]: I1128 13:35:58.844783 4857 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-98dmw container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.37:5000/healthz\": dial tcp 10.217.0.37:5000: connect: connection refused" start-of-body= Nov 28 13:35:58 crc kubenswrapper[4857]: I1128 13:35:58.844879 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" podUID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.37:5000/healthz\": dial tcp 10.217.0.37:5000: connect: connection refused" Nov 28 13:35:59 crc kubenswrapper[4857]: I1128 13:35:59.612965 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" containerID="fb77c744e578a75560342b7129cb73ae7ede546df1dada9abeb9ead237469695" exitCode=0 Nov 28 13:35:59 crc kubenswrapper[4857]: I1128 13:35:59.613004 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" event={"ID":"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8","Type":"ContainerDied","Data":"fb77c744e578a75560342b7129cb73ae7ede546df1dada9abeb9ead237469695"} Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.380727 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529094 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-bound-sa-token\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529137 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-installation-pull-secrets\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529169 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-ca-trust-extracted\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529191 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-certificates\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529216 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mk6s\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-kube-api-access-8mk6s\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-trusted-ca\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529323 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-tls\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.529428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\" (UID: \"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8\") " Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.530307 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.530321 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.534698 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.534901 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-kube-api-access-8mk6s" (OuterVolumeSpecName: "kube-api-access-8mk6s") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "kube-api-access-8mk6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.535938 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.536549 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.541112 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.550800 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" (UID: "bf0e2cfc-2c45-4daf-9534-3d32df9f50e8"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.620700 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" event={"ID":"bf0e2cfc-2c45-4daf-9534-3d32df9f50e8","Type":"ContainerDied","Data":"092e942ccae4df99da3f9014b6677f2337d88122a89fd44fe32fb4abb00bfa03"} Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.620739 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-98dmw" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.620764 4857 scope.go:117] "RemoveContainer" containerID="fb77c744e578a75560342b7129cb73ae7ede546df1dada9abeb9ead237469695" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630813 4857 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630864 4857 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630889 4857 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630911 4857 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630927 4857 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630945 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mk6s\" (UniqueName: \"kubernetes.io/projected/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-kube-api-access-8mk6s\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.630977 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.659923 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-98dmw"] Nov 28 13:36:00 crc kubenswrapper[4857]: I1128 13:36:00.666107 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-98dmw"] Nov 28 13:36:02 crc kubenswrapper[4857]: I1128 13:36:02.235382 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" path="/var/lib/kubelet/pods/bf0e2cfc-2c45-4daf-9534-3d32df9f50e8/volumes" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.308705 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.309273 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.309315 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.309753 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f4ddd2633e38405419b400aeb1d375cfd7367412908feb90f94fc4920c65c449"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:36:11 crc kubenswrapper[4857]: I1128 13:36:11.309813 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://f4ddd2633e38405419b400aeb1d375cfd7367412908feb90f94fc4920c65c449" gracePeriod=600 Nov 28 13:36:12 crc kubenswrapper[4857]: I1128 13:36:12.690843 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="f4ddd2633e38405419b400aeb1d375cfd7367412908feb90f94fc4920c65c449" exitCode=0 Nov 28 13:36:12 crc kubenswrapper[4857]: I1128 13:36:12.692179 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"f4ddd2633e38405419b400aeb1d375cfd7367412908feb90f94fc4920c65c449"} Nov 28 13:36:12 crc kubenswrapper[4857]: I1128 13:36:12.692265 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"89a3edf12385104d0bdb96e68e41d67bf249f17a57579150b21327f4b4e00094"} Nov 28 13:36:12 crc kubenswrapper[4857]: I1128 13:36:12.692305 4857 scope.go:117] "RemoveContainer" containerID="c00ece77e2126ae0eff5317d67bf43797f24749f3254b03d6f6cdd57d4428704" Nov 28 13:38:41 crc kubenswrapper[4857]: I1128 13:38:41.308036 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:38:41 crc kubenswrapper[4857]: I1128 13:38:41.308590 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:39:11 crc kubenswrapper[4857]: I1128 13:39:11.309467 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:39:11 crc kubenswrapper[4857]: I1128 13:39:11.310263 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.308588 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.309194 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.309245 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.309769 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"89a3edf12385104d0bdb96e68e41d67bf249f17a57579150b21327f4b4e00094"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.309831 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://89a3edf12385104d0bdb96e68e41d67bf249f17a57579150b21327f4b4e00094" gracePeriod=600 Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.815336 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="89a3edf12385104d0bdb96e68e41d67bf249f17a57579150b21327f4b4e00094" exitCode=0 Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.815410 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"89a3edf12385104d0bdb96e68e41d67bf249f17a57579150b21327f4b4e00094"} Nov 28 13:39:41 crc kubenswrapper[4857]: I1128 13:39:41.815743 4857 scope.go:117] "RemoveContainer" containerID="f4ddd2633e38405419b400aeb1d375cfd7367412908feb90f94fc4920c65c449" Nov 28 13:39:42 crc kubenswrapper[4857]: I1128 13:39:42.824363 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"4c4a6d6663fe5a6930513c4b0cca32f9c63d61f7609d54dbd2cbc81ca6f31f57"} Nov 28 13:41:44 crc kubenswrapper[4857]: I1128 13:41:44.475435 4857 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.308306 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:42:11 crc kubenswrapper[4857]: I1128 13:42:11.309173 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.309142 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:42:41 crc kubenswrapper[4857]: I1128 13:42:41.310789 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.421675 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-h8td2"] Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.422845 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-controller" containerID="cri-o://d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.422913 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.423000 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="nbdb" containerID="cri-o://60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.422966 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-node" containerID="cri-o://d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.423014 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="sbdb" containerID="cri-o://0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.422870 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="northd" containerID="cri-o://933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.422958 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-acl-logging" containerID="cri-o://a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.462443 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" containerID="cri-o://1690dc88e10e9aa6e4e4415adb0c9a94115389314a85576d033f0fccefa5943b" gracePeriod=30 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.897902 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/2.log" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.898713 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/1.log" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.898760 4857 generic.go:334] "Generic (PLEG): container finished" podID="787c64de-9ce4-41eb-a525-948c23e84595" containerID="586107b95ceda2408d672f603658f1252e6cbceddc10c0ad76403f9446812f05" exitCode=2 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.898818 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerDied","Data":"586107b95ceda2408d672f603658f1252e6cbceddc10c0ad76403f9446812f05"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.898860 4857 scope.go:117] "RemoveContainer" containerID="d4e401482ae63a6e4ae751bb6a59688781c6b78961e3936facd2a2da80a538e5" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.899524 4857 scope.go:117] "RemoveContainer" containerID="586107b95ceda2408d672f603658f1252e6cbceddc10c0ad76403f9446812f05" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.902193 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovnkube-controller/3.log" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.905079 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovn-acl-logging/0.log" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.905685 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovn-controller/0.log" Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906318 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="1690dc88e10e9aa6e4e4415adb0c9a94115389314a85576d033f0fccefa5943b" exitCode=0 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906338 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031" exitCode=0 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906346 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2" exitCode=0 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906353 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758" exitCode=0 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906359 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4" exitCode=0 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906365 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b" exitCode=0 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906371 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242" exitCode=143 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906377 4857 generic.go:334] "Generic (PLEG): container finished" podID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerID="d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7" exitCode=143 Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906394 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"1690dc88e10e9aa6e4e4415adb0c9a94115389314a85576d033f0fccefa5943b"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906416 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906427 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906436 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906444 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906456 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906467 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.906475 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7"} Nov 28 13:42:53 crc kubenswrapper[4857]: I1128 13:42:53.926085 4857 scope.go:117] "RemoveContainer" containerID="63d0afa8ede2911fc623c7b2bd7143f0bac957f388a64f35db245dd14a67c558" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.207308 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovn-acl-logging/0.log" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.208352 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovn-controller/0.log" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.208804 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.264716 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fhw5k"] Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.264977 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.264994 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265009 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-acl-logging" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265018 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-acl-logging" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265026 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="northd" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265034 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="northd" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265042 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="nbdb" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265051 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="nbdb" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265066 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-node" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265074 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-node" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265084 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265091 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265106 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" containerName="registry" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265115 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" containerName="registry" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265128 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265137 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265146 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kubecfg-setup" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265153 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kubecfg-setup" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265165 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="sbdb" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265172 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="sbdb" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265180 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265188 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265197 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265204 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265329 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-ovn-metrics" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265344 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf0e2cfc-2c45-4daf-9534-3d32df9f50e8" containerName="registry" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265355 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="northd" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265364 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265372 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265379 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265388 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-acl-logging" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265428 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="nbdb" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265437 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="kube-rbac-proxy-node" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265450 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265462 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="sbdb" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265470 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovn-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265577 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265588 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265701 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: E1128 13:42:54.265811 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.265821 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" containerName="ovnkube-controller" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.267571 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390606 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovn-node-metrics-cert\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390694 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390723 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-var-lib-cni-networks-ovn-kubernetes\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390743 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-log-socket\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390763 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-systemd\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390780 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-systemd-units\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390803 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-kubelet\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390817 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-node-log\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390835 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-ovn-kubernetes\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390849 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-slash\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390866 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-ovn\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390889 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390908 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-etc-openvswitch\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390923 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-bin\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.390966 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-brfsw\" (UniqueName: \"kubernetes.io/projected/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-kube-api-access-brfsw\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391000 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-netns\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391018 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-config\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-env-overrides\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391063 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-netd\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391412 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391530 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-log-socket" (OuterVolumeSpecName: "log-socket") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391580 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-node-log" (OuterVolumeSpecName: "node-log") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391548 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391825 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391866 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391881 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391892 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391903 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-slash" (OuterVolumeSpecName: "host-slash") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391911 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391924 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391929 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.391981 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.392193 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399127 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399205 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-var-lib-openvswitch\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399263 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-openvswitch\") pod \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\" (UID: \"46c5e02c-be1a-45b7-86ef-cc8c484c4f71\") " Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399487 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-slash\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399520 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wvnh\" (UniqueName: \"kubernetes.io/projected/6948def6-615e-41d6-890b-f578b7adde60-kube-api-access-6wvnh\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-cni-netd\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399565 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-ovnkube-config\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-env-overrides\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399617 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-ovn\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399648 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-systemd-units\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399670 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399722 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-etc-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399753 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399770 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-cni-bin\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399814 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-run-netns\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399835 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6948def6-615e-41d6-890b-f578b7adde60-ovn-node-metrics-cert\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399868 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-node-log\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399904 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-log-socket\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399959 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-ovnkube-script-lib\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.399987 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-kubelet\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.400010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-run-ovn-kubernetes\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.400059 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-systemd\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.400317 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.400968 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-kube-api-access-brfsw" (OuterVolumeSpecName: "kube-api-access-brfsw") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "kube-api-access-brfsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401111 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-var-lib-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401175 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401191 4857 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401200 4857 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401211 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-brfsw\" (UniqueName: \"kubernetes.io/projected/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-kube-api-access-brfsw\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401221 4857 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401229 4857 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401237 4857 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401247 4857 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401256 4857 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401265 4857 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401276 4857 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401285 4857 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-log-socket\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401322 4857 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401333 4857 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401341 4857 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-node-log\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401349 4857 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401357 4857 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-host-slash\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.401364 4857 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.402620 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.414448 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "46c5e02c-be1a-45b7-86ef-cc8c484c4f71" (UID: "46c5e02c-be1a-45b7-86ef-cc8c484c4f71"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.503050 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-var-lib-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.502885 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-var-lib-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.504698 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-slash\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.504818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wvnh\" (UniqueName: \"kubernetes.io/projected/6948def6-615e-41d6-890b-f578b7adde60-kube-api-access-6wvnh\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.504767 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-slash\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505278 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-cni-netd\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-ovnkube-config\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505597 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-env-overrides\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505704 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-ovn\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505804 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-systemd-units\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505892 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506050 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-etc-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506151 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506223 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-ovnkube-config\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506238 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-ovn\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506218 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-systemd-units\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.505356 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-cni-netd\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506271 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-etc-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506298 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-openvswitch\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506299 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506190 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-env-overrides\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506784 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-cni-bin\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506918 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-run-netns\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507039 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6948def6-615e-41d6-890b-f578b7adde60-ovn-node-metrics-cert\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507180 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-node-log\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-log-socket\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507395 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-ovnkube-script-lib\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507490 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-kubelet\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507574 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-kubelet\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507437 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-node-log\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507451 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-log-socket\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506979 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-run-netns\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507584 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-run-ovn-kubernetes\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.506801 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-cni-bin\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507701 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-systemd\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507762 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507776 4857 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/46c5e02c-be1a-45b7-86ef-cc8c484c4f71-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.507781 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-run-systemd\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.508063 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/6948def6-615e-41d6-890b-f578b7adde60-host-run-ovn-kubernetes\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.508169 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6948def6-615e-41d6-890b-f578b7adde60-ovnkube-script-lib\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.509733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6948def6-615e-41d6-890b-f578b7adde60-ovn-node-metrics-cert\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.520861 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wvnh\" (UniqueName: \"kubernetes.io/projected/6948def6-615e-41d6-890b-f578b7adde60-kube-api-access-6wvnh\") pod \"ovnkube-node-fhw5k\" (UID: \"6948def6-615e-41d6-890b-f578b7adde60\") " pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.583201 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.913488 4857 generic.go:334] "Generic (PLEG): container finished" podID="6948def6-615e-41d6-890b-f578b7adde60" containerID="6807271272b921cd1be4eda7aaa57625f13b4b129cdf94d758732e394479f29f" exitCode=0 Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.913535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerDied","Data":"6807271272b921cd1be4eda7aaa57625f13b4b129cdf94d758732e394479f29f"} Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.913588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"94177db2170cb4f47c3f1d469f47196cbadd9a2f2cd4e6d727a29b376d660fed"} Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.915843 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rb7tq_787c64de-9ce4-41eb-a525-948c23e84595/kube-multus/2.log" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.915914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rb7tq" event={"ID":"787c64de-9ce4-41eb-a525-948c23e84595","Type":"ContainerStarted","Data":"a0cd7ded018bf9978b307498095e706ec2c30d2bdc233c5ce89a5939bc1cea93"} Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.937746 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovn-acl-logging/0.log" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.939796 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-h8td2_46c5e02c-be1a-45b7-86ef-cc8c484c4f71/ovn-controller/0.log" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.940524 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" event={"ID":"46c5e02c-be1a-45b7-86ef-cc8c484c4f71","Type":"ContainerDied","Data":"4ba54e9dd19b060f17879becff8f7a30408c8239ce144a6b05f961e92de8a9c1"} Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.940592 4857 scope.go:117] "RemoveContainer" containerID="1690dc88e10e9aa6e4e4415adb0c9a94115389314a85576d033f0fccefa5943b" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.940843 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-h8td2" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.969758 4857 scope.go:117] "RemoveContainer" containerID="0f7d207494d4959cc23a5df6738539f073ec3c45cfe0339bd14b1a3e876f6031" Nov 28 13:42:54 crc kubenswrapper[4857]: I1128 13:42:54.989113 4857 scope.go:117] "RemoveContainer" containerID="60c0f808818d6f71bad9f7b52d7fe42823b6e8ad83d3395366cab0f90fb75ec2" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.009025 4857 scope.go:117] "RemoveContainer" containerID="933498cbc402ee7f14aab38ef86c401dc46df3c21d986c2a97ff1871bb7b7758" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.025718 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-h8td2"] Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.030259 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-h8td2"] Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.032602 4857 scope.go:117] "RemoveContainer" containerID="b553128fa5576f5ac2e327e5e679a8098348ee31f0632c34d2b725d92198f8b4" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.050974 4857 scope.go:117] "RemoveContainer" containerID="d8587850a181bf875ab24005a4b044489046212ae89b15834ccb0f0b46d3518b" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.065526 4857 scope.go:117] "RemoveContainer" containerID="a4bb280266a0eb3369080dfcb1e4d0522f2d365a300f82637288356703bde242" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.078775 4857 scope.go:117] "RemoveContainer" containerID="d1dc7ae9f1870db29ad30c6b4f3c8f8d28015c1e3d9bf27171e5e8a95d51b1b7" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.091964 4857 scope.go:117] "RemoveContainer" containerID="7c965c6c01b15ed72d9ce909297b7945f360bc85228b29b28b48cda253aa89bb" Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.950791 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"b8aca7fc9d599b61378541eb6e9a7f0b729a2d7503bbb63e97829519bae12ca6"} Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.951311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"beb8b3e679771bd09e08199437b40802877b122925a56a621163885e3839bc57"} Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.951332 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"b4b161da39f46634fbb07629c15a194e4de776d615243139921642d119dc8560"} Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.951346 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"553e7cb1071aa8b1d1b16070c67b5661b8664fc67a11a7de54ba8201895d6bdd"} Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.951358 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"60ce57ac70467d384b2ef8c26a4999c7318f30890aab6c6704c92ed0f54af253"} Nov 28 13:42:55 crc kubenswrapper[4857]: I1128 13:42:55.951369 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"2066341fe1bff72fe6d781cb6ac228ca33cf269c72bfa2d1e16fcf8a20ffaadc"} Nov 28 13:42:56 crc kubenswrapper[4857]: I1128 13:42:56.237581 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46c5e02c-be1a-45b7-86ef-cc8c484c4f71" path="/var/lib/kubelet/pods/46c5e02c-be1a-45b7-86ef-cc8c484c4f71/volumes" Nov 28 13:42:58 crc kubenswrapper[4857]: I1128 13:42:58.969265 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"e9dbdf00400f9de1b5eaa45aba57af729d505551186f0770da3c8cfcc91f3289"} Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.566090 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-5kpc2"] Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.567631 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.569103 4857 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-g8nhf" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.569626 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.570460 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.570842 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.600769 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ebafba86-f75f-4f22-b85b-1a65df2eaadd-crc-storage\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.600867 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ebafba86-f75f-4f22-b85b-1a65df2eaadd-node-mnt\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.600892 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ck2sp\" (UniqueName: \"kubernetes.io/projected/ebafba86-f75f-4f22-b85b-1a65df2eaadd-kube-api-access-ck2sp\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.702542 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ebafba86-f75f-4f22-b85b-1a65df2eaadd-crc-storage\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.702645 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ebafba86-f75f-4f22-b85b-1a65df2eaadd-node-mnt\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.702714 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ck2sp\" (UniqueName: \"kubernetes.io/projected/ebafba86-f75f-4f22-b85b-1a65df2eaadd-kube-api-access-ck2sp\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.703144 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ebafba86-f75f-4f22-b85b-1a65df2eaadd-node-mnt\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.703513 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ebafba86-f75f-4f22-b85b-1a65df2eaadd-crc-storage\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.728028 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ck2sp\" (UniqueName: \"kubernetes.io/projected/ebafba86-f75f-4f22-b85b-1a65df2eaadd-kube-api-access-ck2sp\") pod \"crc-storage-crc-5kpc2\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.894400 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: E1128 13:43:01.915523 4857 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(8fed38c15b33a6f4322fe5c4e1cc27776d5a022936e36695fae40f1ba882bc4d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:43:01 crc kubenswrapper[4857]: E1128 13:43:01.915589 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(8fed38c15b33a6f4322fe5c4e1cc27776d5a022936e36695fae40f1ba882bc4d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: E1128 13:43:01.915611 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(8fed38c15b33a6f4322fe5c4e1cc27776d5a022936e36695fae40f1ba882bc4d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:01 crc kubenswrapper[4857]: E1128 13:43:01.915653 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-5kpc2_crc-storage(ebafba86-f75f-4f22-b85b-1a65df2eaadd)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-5kpc2_crc-storage(ebafba86-f75f-4f22-b85b-1a65df2eaadd)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(8fed38c15b33a6f4322fe5c4e1cc27776d5a022936e36695fae40f1ba882bc4d): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-5kpc2" podUID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.990820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" event={"ID":"6948def6-615e-41d6-890b-f578b7adde60","Type":"ContainerStarted","Data":"b47c8a257e82718ab7837f39753d93e2549a09b3f2c129e54bee9a9c127c1557"} Nov 28 13:43:01 crc kubenswrapper[4857]: I1128 13:43:01.991349 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.020061 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" podStartSLOduration=8.020035182 podStartE2EDuration="8.020035182s" podCreationTimestamp="2025-11-28 13:42:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:02.019566059 +0000 UTC m=+832.143507516" watchObservedRunningTime="2025-11-28 13:43:02.020035182 +0000 UTC m=+832.143976619" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.026360 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.236055 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-5kpc2"] Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.236154 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.236531 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:02 crc kubenswrapper[4857]: E1128 13:43:02.253136 4857 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(583df31abb58877f2f47c266b39b683a045716d48bdf022f96c7f082ae3a34f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 28 13:43:02 crc kubenswrapper[4857]: E1128 13:43:02.253214 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(583df31abb58877f2f47c266b39b683a045716d48bdf022f96c7f082ae3a34f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:02 crc kubenswrapper[4857]: E1128 13:43:02.253235 4857 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(583df31abb58877f2f47c266b39b683a045716d48bdf022f96c7f082ae3a34f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:02 crc kubenswrapper[4857]: E1128 13:43:02.253271 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-5kpc2_crc-storage(ebafba86-f75f-4f22-b85b-1a65df2eaadd)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-5kpc2_crc-storage(ebafba86-f75f-4f22-b85b-1a65df2eaadd)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-5kpc2_crc-storage_ebafba86-f75f-4f22-b85b-1a65df2eaadd_0(583df31abb58877f2f47c266b39b683a045716d48bdf022f96c7f082ae3a34f9): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-5kpc2" podUID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.997169 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:43:02 crc kubenswrapper[4857]: I1128 13:43:02.997698 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:43:03 crc kubenswrapper[4857]: I1128 13:43:03.026424 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.308580 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.309249 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.309289 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.309882 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4c4a6d6663fe5a6930513c4b0cca32f9c63d61f7609d54dbd2cbc81ca6f31f57"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:43:11 crc kubenswrapper[4857]: I1128 13:43:11.309936 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://4c4a6d6663fe5a6930513c4b0cca32f9c63d61f7609d54dbd2cbc81ca6f31f57" gracePeriod=600 Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.061928 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="4c4a6d6663fe5a6930513c4b0cca32f9c63d61f7609d54dbd2cbc81ca6f31f57" exitCode=0 Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.061984 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"4c4a6d6663fe5a6930513c4b0cca32f9c63d61f7609d54dbd2cbc81ca6f31f57"} Nov 28 13:43:12 crc kubenswrapper[4857]: I1128 13:43:12.062370 4857 scope.go:117] "RemoveContainer" containerID="89a3edf12385104d0bdb96e68e41d67bf249f17a57579150b21327f4b4e00094" Nov 28 13:43:13 crc kubenswrapper[4857]: I1128 13:43:13.068611 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"cb45ea7c38d2a9151e38696a74767baa9022920c5afdfffe7b22b74cc2cdddc2"} Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.228539 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.229912 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.692884 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-5kpc2"] Nov 28 13:43:17 crc kubenswrapper[4857]: W1128 13:43:17.702001 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebafba86_f75f_4f22_b85b_1a65df2eaadd.slice/crio-4aa503863ac8e20ab5741f76e506e43c032213ccb133590484912588ff518193 WatchSource:0}: Error finding container 4aa503863ac8e20ab5741f76e506e43c032213ccb133590484912588ff518193: Status 404 returned error can't find the container with id 4aa503863ac8e20ab5741f76e506e43c032213ccb133590484912588ff518193 Nov 28 13:43:17 crc kubenswrapper[4857]: I1128 13:43:17.705402 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:43:18 crc kubenswrapper[4857]: I1128 13:43:18.098838 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-5kpc2" event={"ID":"ebafba86-f75f-4f22-b85b-1a65df2eaadd","Type":"ContainerStarted","Data":"4aa503863ac8e20ab5741f76e506e43c032213ccb133590484912588ff518193"} Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.105056 4857 generic.go:334] "Generic (PLEG): container finished" podID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" containerID="d546ee6712067e8cd8df2b617ef7312089b4cf7166fd686cf70c9b6258528935" exitCode=0 Nov 28 13:43:19 crc kubenswrapper[4857]: I1128 13:43:19.105125 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-5kpc2" event={"ID":"ebafba86-f75f-4f22-b85b-1a65df2eaadd","Type":"ContainerDied","Data":"d546ee6712067e8cd8df2b617ef7312089b4cf7166fd686cf70c9b6258528935"} Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.441646 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.540096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ck2sp\" (UniqueName: \"kubernetes.io/projected/ebafba86-f75f-4f22-b85b-1a65df2eaadd-kube-api-access-ck2sp\") pod \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.540233 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ebafba86-f75f-4f22-b85b-1a65df2eaadd-crc-storage\") pod \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.540295 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ebafba86-f75f-4f22-b85b-1a65df2eaadd-node-mnt\") pod \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\" (UID: \"ebafba86-f75f-4f22-b85b-1a65df2eaadd\") " Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.540461 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ebafba86-f75f-4f22-b85b-1a65df2eaadd-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "ebafba86-f75f-4f22-b85b-1a65df2eaadd" (UID: "ebafba86-f75f-4f22-b85b-1a65df2eaadd"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.540773 4857 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/ebafba86-f75f-4f22-b85b-1a65df2eaadd-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.545383 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebafba86-f75f-4f22-b85b-1a65df2eaadd-kube-api-access-ck2sp" (OuterVolumeSpecName: "kube-api-access-ck2sp") pod "ebafba86-f75f-4f22-b85b-1a65df2eaadd" (UID: "ebafba86-f75f-4f22-b85b-1a65df2eaadd"). InnerVolumeSpecName "kube-api-access-ck2sp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.560516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebafba86-f75f-4f22-b85b-1a65df2eaadd-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "ebafba86-f75f-4f22-b85b-1a65df2eaadd" (UID: "ebafba86-f75f-4f22-b85b-1a65df2eaadd"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.642298 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ck2sp\" (UniqueName: \"kubernetes.io/projected/ebafba86-f75f-4f22-b85b-1a65df2eaadd-kube-api-access-ck2sp\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:20 crc kubenswrapper[4857]: I1128 13:43:20.642340 4857 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/ebafba86-f75f-4f22-b85b-1a65df2eaadd-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.532939 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-5kpc2" Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.533005 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-5kpc2" event={"ID":"ebafba86-f75f-4f22-b85b-1a65df2eaadd","Type":"ContainerDied","Data":"4aa503863ac8e20ab5741f76e506e43c032213ccb133590484912588ff518193"} Nov 28 13:43:21 crc kubenswrapper[4857]: I1128 13:43:21.533232 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4aa503863ac8e20ab5741f76e506e43c032213ccb133590484912588ff518193" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.076928 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f2mrp"] Nov 28 13:43:22 crc kubenswrapper[4857]: E1128 13:43:22.077631 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" containerName="storage" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.077650 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" containerName="storage" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.077783 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" containerName="storage" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.078633 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.091572 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f2mrp"] Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.163369 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbc9l\" (UniqueName: \"kubernetes.io/projected/02fdbca2-1145-4c91-90c8-f98fd6f242d1-kube-api-access-rbc9l\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.164211 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-catalog-content\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.164272 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-utilities\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.266089 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-catalog-content\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.266170 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-utilities\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.266218 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbc9l\" (UniqueName: \"kubernetes.io/projected/02fdbca2-1145-4c91-90c8-f98fd6f242d1-kube-api-access-rbc9l\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.266657 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-catalog-content\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.266742 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-utilities\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.291266 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbc9l\" (UniqueName: \"kubernetes.io/projected/02fdbca2-1145-4c91-90c8-f98fd6f242d1-kube-api-access-rbc9l\") pod \"certified-operators-f2mrp\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.397714 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:22 crc kubenswrapper[4857]: I1128 13:43:22.642173 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f2mrp"] Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.545156 4857 generic.go:334] "Generic (PLEG): container finished" podID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerID="91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30" exitCode=0 Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.545313 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerDied","Data":"91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30"} Nov 28 13:43:23 crc kubenswrapper[4857]: I1128 13:43:23.545507 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerStarted","Data":"bd214a712bcc92cbc53453954ba93004a541f7f375ae14e947c77e9e2a1b166a"} Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.552441 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerStarted","Data":"3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260"} Nov 28 13:43:24 crc kubenswrapper[4857]: I1128 13:43:24.607364 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fhw5k" Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.561883 4857 generic.go:334] "Generic (PLEG): container finished" podID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerID="3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260" exitCode=0 Nov 28 13:43:25 crc kubenswrapper[4857]: I1128 13:43:25.561932 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerDied","Data":"3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.186344 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f2mrp"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.201971 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t6lt4"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.202253 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-t6lt4" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="registry-server" containerID="cri-o://8d61c48bd240863a4d3a089bd3d2dfa1fc73910d57ca5aff84737944d59816d1" gracePeriod=30 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.211234 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t6t2s"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.211523 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t6t2s" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="registry-server" containerID="cri-o://2f2143955fc9176d415a6e765e33f6a5fdf28352081d64c27f5ed62bda7bbb15" gracePeriod=30 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.224179 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nx7ct"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.224401 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" containerID="cri-o://44347af9a835a3f5f3e86e0ea9846fc04acef0c3f502950bf5960044eaa00c80" gracePeriod=30 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.273844 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7m4v"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.274243 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-j9xpx"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.274515 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z7m4v" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="registry-server" containerID="cri-o://2d96ae691c43e0e87c7e85bd80c8094434fb91d1a245147ca07ba1ab746bb90a" gracePeriod=30 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.275738 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zw8sc"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.275767 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-j9xpx"] Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.275863 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.276084 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zw8sc" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="registry-server" containerID="cri-o://8c60cc6d2fb177f18ec9983d6fa0a303463e52132c4f3faf516569def94b6df0" gracePeriod=30 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.321146 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4kxm\" (UniqueName: \"kubernetes.io/projected/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-kube-api-access-z4kxm\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.321206 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.321375 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.422681 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4kxm\" (UniqueName: \"kubernetes.io/projected/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-kube-api-access-z4kxm\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.422771 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.422813 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.430181 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.431666 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.439283 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4kxm\" (UniqueName: \"kubernetes.io/projected/63b3515b-2b80-4d2e-b3a7-9e8ff96223f6-kube-api-access-z4kxm\") pod \"marketplace-operator-79b997595-j9xpx\" (UID: \"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6\") " pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.569671 4857 generic.go:334] "Generic (PLEG): container finished" podID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerID="8c60cc6d2fb177f18ec9983d6fa0a303463e52132c4f3faf516569def94b6df0" exitCode=0 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.569738 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerDied","Data":"8c60cc6d2fb177f18ec9983d6fa0a303463e52132c4f3faf516569def94b6df0"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.571550 4857 generic.go:334] "Generic (PLEG): container finished" podID="a45744b5-0329-43e7-834a-535eacdf9717" containerID="2f2143955fc9176d415a6e765e33f6a5fdf28352081d64c27f5ed62bda7bbb15" exitCode=0 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.571584 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerDied","Data":"2f2143955fc9176d415a6e765e33f6a5fdf28352081d64c27f5ed62bda7bbb15"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.571598 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t6t2s" event={"ID":"a45744b5-0329-43e7-834a-535eacdf9717","Type":"ContainerDied","Data":"49b301bc394142d6e80316e648914d818ca8c738ac365e1bd1108349a1c7fff4"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.571608 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49b301bc394142d6e80316e648914d818ca8c738ac365e1bd1108349a1c7fff4" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.573085 4857 generic.go:334] "Generic (PLEG): container finished" podID="88424fcd-c8d4-4df4-8176-30471a90470d" containerID="8d61c48bd240863a4d3a089bd3d2dfa1fc73910d57ca5aff84737944d59816d1" exitCode=0 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.573119 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerDied","Data":"8d61c48bd240863a4d3a089bd3d2dfa1fc73910d57ca5aff84737944d59816d1"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.573134 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-t6lt4" event={"ID":"88424fcd-c8d4-4df4-8176-30471a90470d","Type":"ContainerDied","Data":"d10622a59b30be4cb613118c76b8a01dca1eec778400ddcea7e592ec62d7884c"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.573143 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d10622a59b30be4cb613118c76b8a01dca1eec778400ddcea7e592ec62d7884c" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.575364 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerID="2d96ae691c43e0e87c7e85bd80c8094434fb91d1a245147ca07ba1ab746bb90a" exitCode=0 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.575403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7m4v" event={"ID":"1c9b9dd6-86eb-4361-a872-9027be0c909f","Type":"ContainerDied","Data":"2d96ae691c43e0e87c7e85bd80c8094434fb91d1a245147ca07ba1ab746bb90a"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.576712 4857 generic.go:334] "Generic (PLEG): container finished" podID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerID="44347af9a835a3f5f3e86e0ea9846fc04acef0c3f502950bf5960044eaa00c80" exitCode=0 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.576746 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" event={"ID":"de8315dd-951d-4fe6-a8a9-bc4dd3094743","Type":"ContainerDied","Data":"44347af9a835a3f5f3e86e0ea9846fc04acef0c3f502950bf5960044eaa00c80"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.576760 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" event={"ID":"de8315dd-951d-4fe6-a8a9-bc4dd3094743","Type":"ContainerDied","Data":"18bdafd44f937d256a1aeb1c468ab5a42d71efa287ebdb491ac742902ebd1482"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.576769 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18bdafd44f937d256a1aeb1c468ab5a42d71efa287ebdb491ac742902ebd1482" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.576784 4857 scope.go:117] "RemoveContainer" containerID="2e95b2d97bc8cd52a9710f0be7eebcda78f01e35a0358077b4001bb6a52e2ac6" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.590116 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerStarted","Data":"cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2"} Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.590271 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f2mrp" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="registry-server" containerID="cri-o://cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2" gracePeriod=30 Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.608793 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f2mrp" podStartSLOduration=2.028642836 podStartE2EDuration="4.6087716s" podCreationTimestamp="2025-11-28 13:43:22 +0000 UTC" firstStartedPulling="2025-11-28 13:43:23.546465838 +0000 UTC m=+853.670407275" lastFinishedPulling="2025-11-28 13:43:26.126594582 +0000 UTC m=+856.250536039" observedRunningTime="2025-11-28 13:43:26.608701899 +0000 UTC m=+856.732643336" watchObservedRunningTime="2025-11-28 13:43:26.6087716 +0000 UTC m=+856.732713037" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.617291 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.620069 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.625073 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.639880 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.704056 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.709912 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726230 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvfhn\" (UniqueName: \"kubernetes.io/projected/de8315dd-951d-4fe6-a8a9-bc4dd3094743-kube-api-access-nvfhn\") pod \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726298 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-catalog-content\") pod \"a45744b5-0329-43e7-834a-535eacdf9717\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726341 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-operator-metrics\") pod \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726373 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75snf\" (UniqueName: \"kubernetes.io/projected/1c9b9dd6-86eb-4361-a872-9027be0c909f-kube-api-access-75snf\") pod \"1c9b9dd6-86eb-4361-a872-9027be0c909f\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726393 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-trusted-ca\") pod \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\" (UID: \"de8315dd-951d-4fe6-a8a9-bc4dd3094743\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726416 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-utilities\") pod \"1c9b9dd6-86eb-4361-a872-9027be0c909f\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726440 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q6blx\" (UniqueName: \"kubernetes.io/projected/88424fcd-c8d4-4df4-8176-30471a90470d-kube-api-access-q6blx\") pod \"88424fcd-c8d4-4df4-8176-30471a90470d\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726475 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-utilities\") pod \"88424fcd-c8d4-4df4-8176-30471a90470d\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726492 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-catalog-content\") pod \"88424fcd-c8d4-4df4-8176-30471a90470d\" (UID: \"88424fcd-c8d4-4df4-8176-30471a90470d\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726518 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfvdz\" (UniqueName: \"kubernetes.io/projected/a45744b5-0329-43e7-834a-535eacdf9717-kube-api-access-tfvdz\") pod \"a45744b5-0329-43e7-834a-535eacdf9717\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726612 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-utilities\") pod \"a45744b5-0329-43e7-834a-535eacdf9717\" (UID: \"a45744b5-0329-43e7-834a-535eacdf9717\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.726635 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-catalog-content\") pod \"1c9b9dd6-86eb-4361-a872-9027be0c909f\" (UID: \"1c9b9dd6-86eb-4361-a872-9027be0c909f\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.729424 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-utilities" (OuterVolumeSpecName: "utilities") pod "88424fcd-c8d4-4df4-8176-30471a90470d" (UID: "88424fcd-c8d4-4df4-8176-30471a90470d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.733251 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de8315dd-951d-4fe6-a8a9-bc4dd3094743-kube-api-access-nvfhn" (OuterVolumeSpecName: "kube-api-access-nvfhn") pod "de8315dd-951d-4fe6-a8a9-bc4dd3094743" (UID: "de8315dd-951d-4fe6-a8a9-bc4dd3094743"). InnerVolumeSpecName "kube-api-access-nvfhn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.734680 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "de8315dd-951d-4fe6-a8a9-bc4dd3094743" (UID: "de8315dd-951d-4fe6-a8a9-bc4dd3094743"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.737159 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-utilities" (OuterVolumeSpecName: "utilities") pod "1c9b9dd6-86eb-4361-a872-9027be0c909f" (UID: "1c9b9dd6-86eb-4361-a872-9027be0c909f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.739635 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c9b9dd6-86eb-4361-a872-9027be0c909f-kube-api-access-75snf" (OuterVolumeSpecName: "kube-api-access-75snf") pod "1c9b9dd6-86eb-4361-a872-9027be0c909f" (UID: "1c9b9dd6-86eb-4361-a872-9027be0c909f"). InnerVolumeSpecName "kube-api-access-75snf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.740086 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a45744b5-0329-43e7-834a-535eacdf9717-kube-api-access-tfvdz" (OuterVolumeSpecName: "kube-api-access-tfvdz") pod "a45744b5-0329-43e7-834a-535eacdf9717" (UID: "a45744b5-0329-43e7-834a-535eacdf9717"). InnerVolumeSpecName "kube-api-access-tfvdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.740524 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "de8315dd-951d-4fe6-a8a9-bc4dd3094743" (UID: "de8315dd-951d-4fe6-a8a9-bc4dd3094743"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.740650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-utilities" (OuterVolumeSpecName: "utilities") pod "a45744b5-0329-43e7-834a-535eacdf9717" (UID: "a45744b5-0329-43e7-834a-535eacdf9717"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.746783 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88424fcd-c8d4-4df4-8176-30471a90470d-kube-api-access-q6blx" (OuterVolumeSpecName: "kube-api-access-q6blx") pod "88424fcd-c8d4-4df4-8176-30471a90470d" (UID: "88424fcd-c8d4-4df4-8176-30471a90470d"). InnerVolumeSpecName "kube-api-access-q6blx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.774540 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c9b9dd6-86eb-4361-a872-9027be0c909f" (UID: "1c9b9dd6-86eb-4361-a872-9027be0c909f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.789007 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88424fcd-c8d4-4df4-8176-30471a90470d" (UID: "88424fcd-c8d4-4df4-8176-30471a90470d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.790520 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a45744b5-0329-43e7-834a-535eacdf9717" (UID: "a45744b5-0329-43e7-834a-535eacdf9717"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.815906 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-j9xpx"] Nov 28 13:43:26 crc kubenswrapper[4857]: W1128 13:43:26.821075 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63b3515b_2b80_4d2e_b3a7_9e8ff96223f6.slice/crio-d61eaeaae7094982ee122b5b4d5b917f0bdb97461e6f75d4d8c7a9bced99d5bf WatchSource:0}: Error finding container d61eaeaae7094982ee122b5b4d5b917f0bdb97461e6f75d4d8c7a9bced99d5bf: Status 404 returned error can't find the container with id d61eaeaae7094982ee122b5b4d5b917f0bdb97461e6f75d4d8c7a9bced99d5bf Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d7km\" (UniqueName: \"kubernetes.io/projected/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-kube-api-access-7d7km\") pod \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827443 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-catalog-content\") pod \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827489 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-utilities\") pod \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\" (UID: \"c5ae31f7-1a7f-414c-9fd3-dc7818c66483\") " Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827733 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvfhn\" (UniqueName: \"kubernetes.io/projected/de8315dd-951d-4fe6-a8a9-bc4dd3094743-kube-api-access-nvfhn\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827751 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827761 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827771 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75snf\" (UniqueName: \"kubernetes.io/projected/1c9b9dd6-86eb-4361-a872-9027be0c909f-kube-api-access-75snf\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827779 4857 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/de8315dd-951d-4fe6-a8a9-bc4dd3094743-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827788 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827796 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q6blx\" (UniqueName: \"kubernetes.io/projected/88424fcd-c8d4-4df4-8176-30471a90470d-kube-api-access-q6blx\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827804 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827812 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88424fcd-c8d4-4df4-8176-30471a90470d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827820 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfvdz\" (UniqueName: \"kubernetes.io/projected/a45744b5-0329-43e7-834a-535eacdf9717-kube-api-access-tfvdz\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827829 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a45744b5-0329-43e7-834a-535eacdf9717-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.827837 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c9b9dd6-86eb-4361-a872-9027be0c909f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.828753 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-utilities" (OuterVolumeSpecName: "utilities") pod "c5ae31f7-1a7f-414c-9fd3-dc7818c66483" (UID: "c5ae31f7-1a7f-414c-9fd3-dc7818c66483"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.829849 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-kube-api-access-7d7km" (OuterVolumeSpecName: "kube-api-access-7d7km") pod "c5ae31f7-1a7f-414c-9fd3-dc7818c66483" (UID: "c5ae31f7-1a7f-414c-9fd3-dc7818c66483"). InnerVolumeSpecName "kube-api-access-7d7km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.925073 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-f2mrp_02fdbca2-1145-4c91-90c8-f98fd6f242d1/registry-server/0.log" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.925984 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.928987 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d7km\" (UniqueName: \"kubernetes.io/projected/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-kube-api-access-7d7km\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.929036 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:26 crc kubenswrapper[4857]: I1128 13:43:26.947533 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c5ae31f7-1a7f-414c-9fd3-dc7818c66483" (UID: "c5ae31f7-1a7f-414c-9fd3-dc7818c66483"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.029907 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-catalog-content\") pod \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.029975 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-utilities\") pod \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.030006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbc9l\" (UniqueName: \"kubernetes.io/projected/02fdbca2-1145-4c91-90c8-f98fd6f242d1-kube-api-access-rbc9l\") pod \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\" (UID: \"02fdbca2-1145-4c91-90c8-f98fd6f242d1\") " Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.030164 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5ae31f7-1a7f-414c-9fd3-dc7818c66483-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.030862 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-utilities" (OuterVolumeSpecName: "utilities") pod "02fdbca2-1145-4c91-90c8-f98fd6f242d1" (UID: "02fdbca2-1145-4c91-90c8-f98fd6f242d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.035075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02fdbca2-1145-4c91-90c8-f98fd6f242d1-kube-api-access-rbc9l" (OuterVolumeSpecName: "kube-api-access-rbc9l") pod "02fdbca2-1145-4c91-90c8-f98fd6f242d1" (UID: "02fdbca2-1145-4c91-90c8-f98fd6f242d1"). InnerVolumeSpecName "kube-api-access-rbc9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.084023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "02fdbca2-1145-4c91-90c8-f98fd6f242d1" (UID: "02fdbca2-1145-4c91-90c8-f98fd6f242d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.131604 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.131641 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/02fdbca2-1145-4c91-90c8-f98fd6f242d1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.131651 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbc9l\" (UniqueName: \"kubernetes.io/projected/02fdbca2-1145-4c91-90c8-f98fd6f242d1-kube-api-access-rbc9l\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.599323 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-nx7ct" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.602644 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-f2mrp_02fdbca2-1145-4c91-90c8-f98fd6f242d1/registry-server/0.log" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.603813 4857 generic.go:334] "Generic (PLEG): container finished" podID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerID="cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2" exitCode=1 Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.603903 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerDied","Data":"cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2"} Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.603939 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f2mrp" event={"ID":"02fdbca2-1145-4c91-90c8-f98fd6f242d1","Type":"ContainerDied","Data":"bd214a712bcc92cbc53453954ba93004a541f7f375ae14e947c77e9e2a1b166a"} Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.604001 4857 scope.go:117] "RemoveContainer" containerID="cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.604179 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f2mrp" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.609677 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zw8sc" event={"ID":"c5ae31f7-1a7f-414c-9fd3-dc7818c66483","Type":"ContainerDied","Data":"abd27e66f0284251c51c78f39085ec66a50acb92ef9a3758861844d7affa9d6f"} Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.609741 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zw8sc" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.612488 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" event={"ID":"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6","Type":"ContainerStarted","Data":"71c69d748cfcd2f721188c888215f6d5d638b9799034a1804a8b98db734eff68"} Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.612533 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" event={"ID":"63b3515b-2b80-4d2e-b3a7-9e8ff96223f6","Type":"ContainerStarted","Data":"d61eaeaae7094982ee122b5b4d5b917f0bdb97461e6f75d4d8c7a9bced99d5bf"} Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.613373 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.616421 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z7m4v" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.616424 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z7m4v" event={"ID":"1c9b9dd6-86eb-4361-a872-9027be0c909f","Type":"ContainerDied","Data":"852b5af1ce6de42ac38edbbbedb12077ec3fb4a61f6f9844f62c3f6ec7136848"} Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.616736 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t6t2s" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.617723 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-t6lt4" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.622692 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.641654 4857 scope.go:117] "RemoveContainer" containerID="3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.646682 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-j9xpx" podStartSLOduration=1.646653269 podStartE2EDuration="1.646653269s" podCreationTimestamp="2025-11-28 13:43:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:43:27.635125864 +0000 UTC m=+857.759067381" watchObservedRunningTime="2025-11-28 13:43:27.646653269 +0000 UTC m=+857.770594746" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.682881 4857 scope.go:117] "RemoveContainer" containerID="91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.706922 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nx7ct"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.715027 4857 scope.go:117] "RemoveContainer" containerID="cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2" Nov 28 13:43:27 crc kubenswrapper[4857]: E1128 13:43:27.716491 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2\": container with ID starting with cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2 not found: ID does not exist" containerID="cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.716539 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2"} err="failed to get container status \"cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2\": rpc error: code = NotFound desc = could not find container \"cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2\": container with ID starting with cd9e165e64487410d1d8c0ff095fac51e08f5540f1003b3c0d0152467072d6a2 not found: ID does not exist" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.716570 4857 scope.go:117] "RemoveContainer" containerID="3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260" Nov 28 13:43:27 crc kubenswrapper[4857]: E1128 13:43:27.718046 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260\": container with ID starting with 3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260 not found: ID does not exist" containerID="3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.718124 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260"} err="failed to get container status \"3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260\": rpc error: code = NotFound desc = could not find container \"3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260\": container with ID starting with 3ada2e8de0810eb346613bf97b6935d1bcbb8e358c1b33d0a56c71b966186260 not found: ID does not exist" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.718186 4857 scope.go:117] "RemoveContainer" containerID="91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.718568 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-nx7ct"] Nov 28 13:43:27 crc kubenswrapper[4857]: E1128 13:43:27.718875 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30\": container with ID starting with 91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30 not found: ID does not exist" containerID="91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.719038 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30"} err="failed to get container status \"91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30\": rpc error: code = NotFound desc = could not find container \"91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30\": container with ID starting with 91cc68e56ba64da14373ecce4517beb7090b4c504cc55218a2ebaf514ed01c30 not found: ID does not exist" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.719075 4857 scope.go:117] "RemoveContainer" containerID="8c60cc6d2fb177f18ec9983d6fa0a303463e52132c4f3faf516569def94b6df0" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.724908 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zw8sc"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.730474 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zw8sc"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.735456 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7m4v"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.744900 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z7m4v"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.747514 4857 scope.go:117] "RemoveContainer" containerID="ec546d5ae7185b08abfbb757dd7c10739d043fca287768831d31f8ddb03dcba7" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.753214 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-t6lt4"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.757035 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-t6lt4"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.768443 4857 scope.go:117] "RemoveContainer" containerID="d4e3dbc9639e2175c33e952a51809f762d59d17a5c7ba964c4e7db506bc0e4f8" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.776684 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f2mrp"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.781406 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f2mrp"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.785881 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t6t2s"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.788852 4857 scope.go:117] "RemoveContainer" containerID="2d96ae691c43e0e87c7e85bd80c8094434fb91d1a245147ca07ba1ab746bb90a" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.789739 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t6t2s"] Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.828007 4857 scope.go:117] "RemoveContainer" containerID="626db5dc2c1b52c1db63ce240fb462035feaf5c6a91fbe742cd23b9dda342c2b" Nov 28 13:43:27 crc kubenswrapper[4857]: I1128 13:43:27.839017 4857 scope.go:117] "RemoveContainer" containerID="018f942b13cbe461ed784d4b79edde34482a5eb8e81d5badc1c83cc668ba6d95" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.236565 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" path="/var/lib/kubelet/pods/02fdbca2-1145-4c91-90c8-f98fd6f242d1/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.238364 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" path="/var/lib/kubelet/pods/1c9b9dd6-86eb-4361-a872-9027be0c909f/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.240203 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" path="/var/lib/kubelet/pods/88424fcd-c8d4-4df4-8176-30471a90470d/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.243019 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a45744b5-0329-43e7-834a-535eacdf9717" path="/var/lib/kubelet/pods/a45744b5-0329-43e7-834a-535eacdf9717/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.245082 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" path="/var/lib/kubelet/pods/c5ae31f7-1a7f-414c-9fd3-dc7818c66483/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.246097 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" path="/var/lib/kubelet/pods/de8315dd-951d-4fe6-a8a9-bc4dd3094743/volumes" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467387 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mdjp7"] Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467658 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467676 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467698 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467706 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467715 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467725 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467734 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467742 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467753 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467761 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467773 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467780 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467792 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467799 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467813 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467820 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467832 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467842 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467850 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467857 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467867 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467875 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467889 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467899 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467911 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467918 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467928 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467935 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467979 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.467990 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="extract-utilities" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.467999 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468008 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: E1128 13:43:28.468018 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468026 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="extract-content" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468236 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5ae31f7-1a7f-414c-9fd3-dc7818c66483" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468251 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c9b9dd6-86eb-4361-a872-9027be0c909f" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468260 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468293 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="de8315dd-951d-4fe6-a8a9-bc4dd3094743" containerName="marketplace-operator" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468308 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="02fdbca2-1145-4c91-90c8-f98fd6f242d1" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468320 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a45744b5-0329-43e7-834a-535eacdf9717" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.468333 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="88424fcd-c8d4-4df4-8176-30471a90470d" containerName="registry-server" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.469319 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.470965 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mdjp7"] Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.473517 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.550490 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jq4d5\" (UniqueName: \"kubernetes.io/projected/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-kube-api-access-jq4d5\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.550712 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-catalog-content\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.550766 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-utilities\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.660118 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jq4d5\" (UniqueName: \"kubernetes.io/projected/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-kube-api-access-jq4d5\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.660297 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-catalog-content\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.660354 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-utilities\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.661342 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-utilities\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.662387 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-catalog-content\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.669335 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-86gwb"] Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.670524 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.672672 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.677708 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-86gwb"] Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.702826 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jq4d5\" (UniqueName: \"kubernetes.io/projected/9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91-kube-api-access-jq4d5\") pod \"community-operators-mdjp7\" (UID: \"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91\") " pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.760979 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spr9h\" (UniqueName: \"kubernetes.io/projected/2142b9c8-bd24-407d-a45f-b2a9d2019b71-kube-api-access-spr9h\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.761026 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2142b9c8-bd24-407d-a45f-b2a9d2019b71-catalog-content\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.761062 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2142b9c8-bd24-407d-a45f-b2a9d2019b71-utilities\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.793936 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.862744 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spr9h\" (UniqueName: \"kubernetes.io/projected/2142b9c8-bd24-407d-a45f-b2a9d2019b71-kube-api-access-spr9h\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.862800 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2142b9c8-bd24-407d-a45f-b2a9d2019b71-catalog-content\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.862838 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2142b9c8-bd24-407d-a45f-b2a9d2019b71-utilities\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.863357 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2142b9c8-bd24-407d-a45f-b2a9d2019b71-utilities\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.863781 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rbm42"] Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.867787 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.871620 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2142b9c8-bd24-407d-a45f-b2a9d2019b71-catalog-content\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.874847 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rbm42"] Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.886911 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spr9h\" (UniqueName: \"kubernetes.io/projected/2142b9c8-bd24-407d-a45f-b2a9d2019b71-kube-api-access-spr9h\") pod \"redhat-marketplace-86gwb\" (UID: \"2142b9c8-bd24-407d-a45f-b2a9d2019b71\") " pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.963759 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jls8c\" (UniqueName: \"kubernetes.io/projected/fba802c6-069f-4872-9ed0-24ee26941f0f-kube-api-access-jls8c\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.963994 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-catalog-content\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.964047 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-utilities\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:28 crc kubenswrapper[4857]: I1128 13:43:28.996672 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.012309 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mdjp7"] Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.057661 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bfv4b"] Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.058819 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.062690 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfv4b"] Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.065787 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jls8c\" (UniqueName: \"kubernetes.io/projected/fba802c6-069f-4872-9ed0-24ee26941f0f-kube-api-access-jls8c\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.066027 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-catalog-content\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.066063 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-utilities\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.066654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-catalog-content\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.067475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-utilities\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.085189 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jls8c\" (UniqueName: \"kubernetes.io/projected/fba802c6-069f-4872-9ed0-24ee26941f0f-kube-api-access-jls8c\") pod \"community-operators-rbm42\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.167481 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-catalog-content\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.167561 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8g5x\" (UniqueName: \"kubernetes.io/projected/0767ad94-3350-4068-a3af-7ba756b44b2f-kube-api-access-v8g5x\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.167642 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-utilities\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.177546 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-86gwb"] Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.183888 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:29 crc kubenswrapper[4857]: W1128 13:43:29.195984 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2142b9c8_bd24_407d_a45f_b2a9d2019b71.slice/crio-bef7746ca4e46857d6a9348aeb9e33719a034721546eda3f678f0ac6088a1d1f WatchSource:0}: Error finding container bef7746ca4e46857d6a9348aeb9e33719a034721546eda3f678f0ac6088a1d1f: Status 404 returned error can't find the container with id bef7746ca4e46857d6a9348aeb9e33719a034721546eda3f678f0ac6088a1d1f Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.268696 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-utilities\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.268761 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-catalog-content\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.268802 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8g5x\" (UniqueName: \"kubernetes.io/projected/0767ad94-3350-4068-a3af-7ba756b44b2f-kube-api-access-v8g5x\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.269259 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-utilities\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.269515 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-catalog-content\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.292069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8g5x\" (UniqueName: \"kubernetes.io/projected/0767ad94-3350-4068-a3af-7ba756b44b2f-kube-api-access-v8g5x\") pod \"redhat-marketplace-bfv4b\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.339073 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rbm42"] Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.394164 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.635677 4857 generic.go:334] "Generic (PLEG): container finished" podID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerID="87949ecd42457c4774918f89169beb8f48c1225b3837136cf375329e4717fef8" exitCode=0 Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.635787 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerDied","Data":"87949ecd42457c4774918f89169beb8f48c1225b3837136cf375329e4717fef8"} Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.635828 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerStarted","Data":"2660eb06b7abca1cd0ef61cf48b163293b2249f966ba0ac1bfd31a6e9336b79b"} Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.637379 4857 generic.go:334] "Generic (PLEG): container finished" podID="9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91" containerID="07874118ad2672e3dacabbe835f2519ad2c6431f80eeacec682ff16757d862cb" exitCode=0 Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.637419 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdjp7" event={"ID":"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91","Type":"ContainerDied","Data":"07874118ad2672e3dacabbe835f2519ad2c6431f80eeacec682ff16757d862cb"} Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.637444 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdjp7" event={"ID":"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91","Type":"ContainerStarted","Data":"1d37b1558741572e6110b616a7ba26f57eb14079f7dad7e44284de21da50e90d"} Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.639658 4857 generic.go:334] "Generic (PLEG): container finished" podID="2142b9c8-bd24-407d-a45f-b2a9d2019b71" containerID="26a8bf913affa49dd1af377bbd8ccca263342ee4824413e3df602e10e42792f1" exitCode=0 Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.639684 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-86gwb" event={"ID":"2142b9c8-bd24-407d-a45f-b2a9d2019b71","Type":"ContainerDied","Data":"26a8bf913affa49dd1af377bbd8ccca263342ee4824413e3df602e10e42792f1"} Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.639713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-86gwb" event={"ID":"2142b9c8-bd24-407d-a45f-b2a9d2019b71","Type":"ContainerStarted","Data":"bef7746ca4e46857d6a9348aeb9e33719a034721546eda3f678f0ac6088a1d1f"} Nov 28 13:43:29 crc kubenswrapper[4857]: I1128 13:43:29.803709 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfv4b"] Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.646086 4857 generic.go:334] "Generic (PLEG): container finished" podID="2142b9c8-bd24-407d-a45f-b2a9d2019b71" containerID="a02e9d3bdcb3ab0fcf33f6bcd9120cc7a3a299d86b55c395d9443f4ba242dc46" exitCode=0 Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.646155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-86gwb" event={"ID":"2142b9c8-bd24-407d-a45f-b2a9d2019b71","Type":"ContainerDied","Data":"a02e9d3bdcb3ab0fcf33f6bcd9120cc7a3a299d86b55c395d9443f4ba242dc46"} Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.650293 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerStarted","Data":"ac81a6a01cda02e7186514cf8906abedb1c21cfb8c1027e306c6ccb7c99fd343"} Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.652068 4857 generic.go:334] "Generic (PLEG): container finished" podID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerID="03e0d80c3d1f32bf27a213e42448ff88f2916278e334de1cbf6e2793a7731812" exitCode=0 Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.652100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfv4b" event={"ID":"0767ad94-3350-4068-a3af-7ba756b44b2f","Type":"ContainerDied","Data":"03e0d80c3d1f32bf27a213e42448ff88f2916278e334de1cbf6e2793a7731812"} Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.652442 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfv4b" event={"ID":"0767ad94-3350-4068-a3af-7ba756b44b2f","Type":"ContainerStarted","Data":"513149adebc891cc660a87629456ddb2c206936410bff722a75d98fd7254e7c0"} Nov 28 13:43:30 crc kubenswrapper[4857]: I1128 13:43:30.655970 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdjp7" event={"ID":"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91","Type":"ContainerStarted","Data":"305763b68da628483ccf5fc07b41a65e2418355bb6dd5471b4dea127952d2e33"} Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.262999 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t9qsp"] Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.265188 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.268452 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.279269 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t9qsp"] Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.305637 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpw2w\" (UniqueName: \"kubernetes.io/projected/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-kube-api-access-fpw2w\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.305928 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-catalog-content\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.306145 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-utilities\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.407218 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-catalog-content\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.407282 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-utilities\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.407322 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpw2w\" (UniqueName: \"kubernetes.io/projected/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-kube-api-access-fpw2w\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.408564 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-utilities\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.420664 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-catalog-content\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.436206 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpw2w\" (UniqueName: \"kubernetes.io/projected/7b43afe0-d9e1-4864-9a0e-b7e900755cfd-kube-api-access-fpw2w\") pod \"redhat-operators-t9qsp\" (UID: \"7b43afe0-d9e1-4864-9a0e-b7e900755cfd\") " pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.461667 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pbgdj"] Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.462593 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.472507 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.476625 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pbgdj"] Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.507835 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-utilities\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.507926 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78jcg\" (UniqueName: \"kubernetes.io/projected/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-kube-api-access-78jcg\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.508016 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-catalog-content\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.609670 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-utilities\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.609740 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78jcg\" (UniqueName: \"kubernetes.io/projected/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-kube-api-access-78jcg\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.609764 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-catalog-content\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.610204 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-catalog-content\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.610451 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-utilities\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.628009 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.639808 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78jcg\" (UniqueName: \"kubernetes.io/projected/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-kube-api-access-78jcg\") pod \"certified-operators-pbgdj\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.667820 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v4nk2"] Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.671782 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.678834 4857 generic.go:334] "Generic (PLEG): container finished" podID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerID="ac81a6a01cda02e7186514cf8906abedb1c21cfb8c1027e306c6ccb7c99fd343" exitCode=0 Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.678916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerDied","Data":"ac81a6a01cda02e7186514cf8906abedb1c21cfb8c1027e306c6ccb7c99fd343"} Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.682641 4857 generic.go:334] "Generic (PLEG): container finished" podID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerID="cf742b1b45d819eec18ddffc246cebec0c5344f4dae1aa2356c87c269566aa0e" exitCode=0 Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.687649 4857 generic.go:334] "Generic (PLEG): container finished" podID="9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91" containerID="305763b68da628483ccf5fc07b41a65e2418355bb6dd5471b4dea127952d2e33" exitCode=0 Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.688533 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v4nk2"] Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.690899 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfv4b" event={"ID":"0767ad94-3350-4068-a3af-7ba756b44b2f","Type":"ContainerDied","Data":"cf742b1b45d819eec18ddffc246cebec0c5344f4dae1aa2356c87c269566aa0e"} Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.690934 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdjp7" event={"ID":"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91","Type":"ContainerDied","Data":"305763b68da628483ccf5fc07b41a65e2418355bb6dd5471b4dea127952d2e33"} Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.699541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-86gwb" event={"ID":"2142b9c8-bd24-407d-a45f-b2a9d2019b71","Type":"ContainerStarted","Data":"66fac78ace86402c578cb69d32d99e52bd705c67d02ab05a86a354d179dfc09c"} Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.711975 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72ffc\" (UniqueName: \"kubernetes.io/projected/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-kube-api-access-72ffc\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.712039 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-utilities\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.712099 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-catalog-content\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.785460 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-86gwb" podStartSLOduration=2.177550167 podStartE2EDuration="3.785443572s" podCreationTimestamp="2025-11-28 13:43:28 +0000 UTC" firstStartedPulling="2025-11-28 13:43:29.640974631 +0000 UTC m=+859.764916078" lastFinishedPulling="2025-11-28 13:43:31.248868016 +0000 UTC m=+861.372809483" observedRunningTime="2025-11-28 13:43:31.78171716 +0000 UTC m=+861.905658597" watchObservedRunningTime="2025-11-28 13:43:31.785443572 +0000 UTC m=+861.909384999" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.805832 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.812828 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72ffc\" (UniqueName: \"kubernetes.io/projected/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-kube-api-access-72ffc\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.812891 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-utilities\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.812966 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-catalog-content\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.813645 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-utilities\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.813919 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-catalog-content\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.834470 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72ffc\" (UniqueName: \"kubernetes.io/projected/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-kube-api-access-72ffc\") pod \"redhat-operators-v4nk2\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:31 crc kubenswrapper[4857]: I1128 13:43:31.870975 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t9qsp"] Nov 28 13:43:31 crc kubenswrapper[4857]: W1128 13:43:31.881100 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7b43afe0_d9e1_4864_9a0e_b7e900755cfd.slice/crio-51f4ed338558ebf2b365107d2413cfa1f15cf2f7c884e0a3367696fd8f51a248 WatchSource:0}: Error finding container 51f4ed338558ebf2b365107d2413cfa1f15cf2f7c884e0a3367696fd8f51a248: Status 404 returned error can't find the container with id 51f4ed338558ebf2b365107d2413cfa1f15cf2f7c884e0a3367696fd8f51a248 Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.011489 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pbgdj"] Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.012298 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.216244 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v4nk2"] Nov 28 13:43:32 crc kubenswrapper[4857]: W1128 13:43:32.224996 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod243ce4f9_61d4_4a8a_a112_35f3ecdba48a.slice/crio-6570ca611cbbc16bb35fa69cb0cbc2ce34d95dca2c05e01cea86ac8202a10874 WatchSource:0}: Error finding container 6570ca611cbbc16bb35fa69cb0cbc2ce34d95dca2c05e01cea86ac8202a10874: Status 404 returned error can't find the container with id 6570ca611cbbc16bb35fa69cb0cbc2ce34d95dca2c05e01cea86ac8202a10874 Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.706610 4857 generic.go:334] "Generic (PLEG): container finished" podID="7b43afe0-d9e1-4864-9a0e-b7e900755cfd" containerID="81b14154535b4fc42a5598b2f5f4e832dfd2faf34881467d76371de0beba130c" exitCode=0 Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.706695 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t9qsp" event={"ID":"7b43afe0-d9e1-4864-9a0e-b7e900755cfd","Type":"ContainerDied","Data":"81b14154535b4fc42a5598b2f5f4e832dfd2faf34881467d76371de0beba130c"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.706739 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t9qsp" event={"ID":"7b43afe0-d9e1-4864-9a0e-b7e900755cfd","Type":"ContainerStarted","Data":"51f4ed338558ebf2b365107d2413cfa1f15cf2f7c884e0a3367696fd8f51a248"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.709884 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mdjp7" event={"ID":"9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91","Type":"ContainerStarted","Data":"bc756c46622e872ec49ef1e33973eaf32517e263032bbb7443c7a31c8631c759"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.713181 4857 generic.go:334] "Generic (PLEG): container finished" podID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerID="846a4d63c6dd4ec22c650ff501b64bd62fa270b278543476618ec41a7158afe2" exitCode=0 Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.713261 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbgdj" event={"ID":"9efdbce5-c29e-4601-bc0e-0b65a8dd7899","Type":"ContainerDied","Data":"846a4d63c6dd4ec22c650ff501b64bd62fa270b278543476618ec41a7158afe2"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.713294 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbgdj" event={"ID":"9efdbce5-c29e-4601-bc0e-0b65a8dd7899","Type":"ContainerStarted","Data":"c3a984c02b345fb4e352a6adb6e9d5c7c73a15b8e5e73fbd487b57b984b41735"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.716074 4857 generic.go:334] "Generic (PLEG): container finished" podID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerID="efa4514dfe054b70403830d7cf3307868106978fb31835823a04d22b6dd5043e" exitCode=0 Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.716338 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4nk2" event={"ID":"243ce4f9-61d4-4a8a-a112-35f3ecdba48a","Type":"ContainerDied","Data":"efa4514dfe054b70403830d7cf3307868106978fb31835823a04d22b6dd5043e"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.716405 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4nk2" event={"ID":"243ce4f9-61d4-4a8a-a112-35f3ecdba48a","Type":"ContainerStarted","Data":"6570ca611cbbc16bb35fa69cb0cbc2ce34d95dca2c05e01cea86ac8202a10874"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.719862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerStarted","Data":"0b7d8d2c1618f60d8d3158035ccd9b5ba4cb2584d0499071431099ac9c92f357"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.724606 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfv4b" event={"ID":"0767ad94-3350-4068-a3af-7ba756b44b2f","Type":"ContainerStarted","Data":"4775c493df87851970d8e42016c5e2d42648011cfdc1133ab7e1cef6e84e854b"} Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.770417 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rbm42" podStartSLOduration=2.273966259 podStartE2EDuration="4.770390554s" podCreationTimestamp="2025-11-28 13:43:28 +0000 UTC" firstStartedPulling="2025-11-28 13:43:29.637549927 +0000 UTC m=+859.761491364" lastFinishedPulling="2025-11-28 13:43:32.133974222 +0000 UTC m=+862.257915659" observedRunningTime="2025-11-28 13:43:32.766028395 +0000 UTC m=+862.889969832" watchObservedRunningTime="2025-11-28 13:43:32.770390554 +0000 UTC m=+862.894331991" Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.808866 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mdjp7" podStartSLOduration=2.3169867650000002 podStartE2EDuration="4.808851625s" podCreationTimestamp="2025-11-28 13:43:28 +0000 UTC" firstStartedPulling="2025-11-28 13:43:29.63948738 +0000 UTC m=+859.763428807" lastFinishedPulling="2025-11-28 13:43:32.13135223 +0000 UTC m=+862.255293667" observedRunningTime="2025-11-28 13:43:32.80573998 +0000 UTC m=+862.929681417" watchObservedRunningTime="2025-11-28 13:43:32.808851625 +0000 UTC m=+862.932793062" Nov 28 13:43:32 crc kubenswrapper[4857]: I1128 13:43:32.828625 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bfv4b" podStartSLOduration=2.237825355 podStartE2EDuration="3.828602214s" podCreationTimestamp="2025-11-28 13:43:29 +0000 UTC" firstStartedPulling="2025-11-28 13:43:30.653204017 +0000 UTC m=+860.777145454" lastFinishedPulling="2025-11-28 13:43:32.243980876 +0000 UTC m=+862.367922313" observedRunningTime="2025-11-28 13:43:32.824527633 +0000 UTC m=+862.948469070" watchObservedRunningTime="2025-11-28 13:43:32.828602214 +0000 UTC m=+862.952543651" Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.740247 4857 generic.go:334] "Generic (PLEG): container finished" podID="7b43afe0-d9e1-4864-9a0e-b7e900755cfd" containerID="0c761261ee26948cc87c024ccf91569fa78b195e96035be8f6956fc7e00388fa" exitCode=0 Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.740373 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t9qsp" event={"ID":"7b43afe0-d9e1-4864-9a0e-b7e900755cfd","Type":"ContainerDied","Data":"0c761261ee26948cc87c024ccf91569fa78b195e96035be8f6956fc7e00388fa"} Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.743660 4857 generic.go:334] "Generic (PLEG): container finished" podID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerID="5aad8171f8d7fe2c692ccb90553a51eb6751f9cf7b229c2c61f4e302d8906f6a" exitCode=0 Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.743722 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbgdj" event={"ID":"9efdbce5-c29e-4601-bc0e-0b65a8dd7899","Type":"ContainerDied","Data":"5aad8171f8d7fe2c692ccb90553a51eb6751f9cf7b229c2c61f4e302d8906f6a"} Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.745694 4857 generic.go:334] "Generic (PLEG): container finished" podID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerID="2faf9a8fef874f6818c9b1cb6921e09da4204479730df91094de31ea20797b41" exitCode=0 Nov 28 13:43:34 crc kubenswrapper[4857]: I1128 13:43:34.745721 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4nk2" event={"ID":"243ce4f9-61d4-4a8a-a112-35f3ecdba48a","Type":"ContainerDied","Data":"2faf9a8fef874f6818c9b1cb6921e09da4204479730df91094de31ea20797b41"} Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.755453 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4nk2" event={"ID":"243ce4f9-61d4-4a8a-a112-35f3ecdba48a","Type":"ContainerStarted","Data":"369718bab05d14ae16c302bb14350dd9eae2395f2f98ce4a2c06260365935fa4"} Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.759123 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t9qsp" event={"ID":"7b43afe0-d9e1-4864-9a0e-b7e900755cfd","Type":"ContainerStarted","Data":"d002517a34c6e707b0f8af637a0e4beebe14a2a9353519c10656a724939d95e9"} Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.761801 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbgdj" event={"ID":"9efdbce5-c29e-4601-bc0e-0b65a8dd7899","Type":"ContainerStarted","Data":"6487608a645ff66f82c8a01b776ec303ecb5feaff63daa9ef1f9da55971ff37e"} Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.784884 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v4nk2" podStartSLOduration=2.216051177 podStartE2EDuration="4.784866868s" podCreationTimestamp="2025-11-28 13:43:31 +0000 UTC" firstStartedPulling="2025-11-28 13:43:32.717390847 +0000 UTC m=+862.841332284" lastFinishedPulling="2025-11-28 13:43:35.286206508 +0000 UTC m=+865.410147975" observedRunningTime="2025-11-28 13:43:35.780962832 +0000 UTC m=+865.904904269" watchObservedRunningTime="2025-11-28 13:43:35.784866868 +0000 UTC m=+865.908808295" Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.804329 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t9qsp" podStartSLOduration=2.250417215 podStartE2EDuration="4.804309139s" podCreationTimestamp="2025-11-28 13:43:31 +0000 UTC" firstStartedPulling="2025-11-28 13:43:32.710273622 +0000 UTC m=+862.834215079" lastFinishedPulling="2025-11-28 13:43:35.264165556 +0000 UTC m=+865.388107003" observedRunningTime="2025-11-28 13:43:35.802697845 +0000 UTC m=+865.926639282" watchObservedRunningTime="2025-11-28 13:43:35.804309139 +0000 UTC m=+865.928250576" Nov 28 13:43:35 crc kubenswrapper[4857]: I1128 13:43:35.824793 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pbgdj" podStartSLOduration=2.37396231 podStartE2EDuration="4.824767998s" podCreationTimestamp="2025-11-28 13:43:31 +0000 UTC" firstStartedPulling="2025-11-28 13:43:32.714438056 +0000 UTC m=+862.838379493" lastFinishedPulling="2025-11-28 13:43:35.165243744 +0000 UTC m=+865.289185181" observedRunningTime="2025-11-28 13:43:35.816513153 +0000 UTC m=+865.940454590" watchObservedRunningTime="2025-11-28 13:43:35.824767998 +0000 UTC m=+865.948709445" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.795018 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.795061 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.838095 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.996846 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:38 crc kubenswrapper[4857]: I1128 13:43:38.996930 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.039182 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.184497 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.184555 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.237595 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.395114 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.395176 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.460131 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.832839 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.837110 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-86gwb" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.837429 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:39 crc kubenswrapper[4857]: I1128 13:43:39.838773 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mdjp7" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.629248 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.629352 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.711979 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.806689 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.806728 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.838611 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t9qsp" Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.847731 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rbm42"] Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.847998 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rbm42" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="registry-server" containerID="cri-o://0b7d8d2c1618f60d8d3158035ccd9b5ba4cb2584d0499071431099ac9c92f357" gracePeriod=2 Nov 28 13:43:41 crc kubenswrapper[4857]: I1128 13:43:41.862561 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.012635 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.013024 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.047327 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfv4b"] Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.047546 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bfv4b" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="registry-server" containerID="cri-o://4775c493df87851970d8e42016c5e2d42648011cfdc1133ab7e1cef6e84e854b" gracePeriod=2 Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.053079 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.843431 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:42 crc kubenswrapper[4857]: I1128 13:43:42.845367 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.249823 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v4nk2"] Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.811124 4857 generic.go:334] "Generic (PLEG): container finished" podID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerID="0b7d8d2c1618f60d8d3158035ccd9b5ba4cb2584d0499071431099ac9c92f357" exitCode=0 Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.811150 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerDied","Data":"0b7d8d2c1618f60d8d3158035ccd9b5ba4cb2584d0499071431099ac9c92f357"} Nov 28 13:43:44 crc kubenswrapper[4857]: I1128 13:43:44.811588 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v4nk2" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="registry-server" containerID="cri-o://369718bab05d14ae16c302bb14350dd9eae2395f2f98ce4a2c06260365935fa4" gracePeriod=2 Nov 28 13:43:45 crc kubenswrapper[4857]: I1128 13:43:45.818017 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bfv4b_0767ad94-3350-4068-a3af-7ba756b44b2f/registry-server/0.log" Nov 28 13:43:45 crc kubenswrapper[4857]: I1128 13:43:45.819395 4857 generic.go:334] "Generic (PLEG): container finished" podID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerID="4775c493df87851970d8e42016c5e2d42648011cfdc1133ab7e1cef6e84e854b" exitCode=137 Nov 28 13:43:45 crc kubenswrapper[4857]: I1128 13:43:45.819452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfv4b" event={"ID":"0767ad94-3350-4068-a3af-7ba756b44b2f","Type":"ContainerDied","Data":"4775c493df87851970d8e42016c5e2d42648011cfdc1133ab7e1cef6e84e854b"} Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.673419 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.678217 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bfv4b_0767ad94-3350-4068-a3af-7ba756b44b2f/registry-server/0.log" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.679108 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.796729 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-catalog-content\") pod \"fba802c6-069f-4872-9ed0-24ee26941f0f\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.796842 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jls8c\" (UniqueName: \"kubernetes.io/projected/fba802c6-069f-4872-9ed0-24ee26941f0f-kube-api-access-jls8c\") pod \"fba802c6-069f-4872-9ed0-24ee26941f0f\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.796877 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-utilities\") pod \"fba802c6-069f-4872-9ed0-24ee26941f0f\" (UID: \"fba802c6-069f-4872-9ed0-24ee26941f0f\") " Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.796906 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-utilities\") pod \"0767ad94-3350-4068-a3af-7ba756b44b2f\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.796939 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-catalog-content\") pod \"0767ad94-3350-4068-a3af-7ba756b44b2f\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.797023 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8g5x\" (UniqueName: \"kubernetes.io/projected/0767ad94-3350-4068-a3af-7ba756b44b2f-kube-api-access-v8g5x\") pod \"0767ad94-3350-4068-a3af-7ba756b44b2f\" (UID: \"0767ad94-3350-4068-a3af-7ba756b44b2f\") " Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.799057 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-utilities" (OuterVolumeSpecName: "utilities") pod "0767ad94-3350-4068-a3af-7ba756b44b2f" (UID: "0767ad94-3350-4068-a3af-7ba756b44b2f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.800115 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-utilities" (OuterVolumeSpecName: "utilities") pod "fba802c6-069f-4872-9ed0-24ee26941f0f" (UID: "fba802c6-069f-4872-9ed0-24ee26941f0f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.803146 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fba802c6-069f-4872-9ed0-24ee26941f0f-kube-api-access-jls8c" (OuterVolumeSpecName: "kube-api-access-jls8c") pod "fba802c6-069f-4872-9ed0-24ee26941f0f" (UID: "fba802c6-069f-4872-9ed0-24ee26941f0f"). InnerVolumeSpecName "kube-api-access-jls8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.803537 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0767ad94-3350-4068-a3af-7ba756b44b2f-kube-api-access-v8g5x" (OuterVolumeSpecName: "kube-api-access-v8g5x") pod "0767ad94-3350-4068-a3af-7ba756b44b2f" (UID: "0767ad94-3350-4068-a3af-7ba756b44b2f"). InnerVolumeSpecName "kube-api-access-v8g5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.828035 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0767ad94-3350-4068-a3af-7ba756b44b2f" (UID: "0767ad94-3350-4068-a3af-7ba756b44b2f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.832355 4857 generic.go:334] "Generic (PLEG): container finished" podID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerID="369718bab05d14ae16c302bb14350dd9eae2395f2f98ce4a2c06260365935fa4" exitCode=0 Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.832545 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4nk2" event={"ID":"243ce4f9-61d4-4a8a-a112-35f3ecdba48a","Type":"ContainerDied","Data":"369718bab05d14ae16c302bb14350dd9eae2395f2f98ce4a2c06260365935fa4"} Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.835134 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rbm42" event={"ID":"fba802c6-069f-4872-9ed0-24ee26941f0f","Type":"ContainerDied","Data":"2660eb06b7abca1cd0ef61cf48b163293b2249f966ba0ac1bfd31a6e9336b79b"} Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.835278 4857 scope.go:117] "RemoveContainer" containerID="0b7d8d2c1618f60d8d3158035ccd9b5ba4cb2584d0499071431099ac9c92f357" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.835479 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rbm42" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.839209 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-bfv4b_0767ad94-3350-4068-a3af-7ba756b44b2f/registry-server/0.log" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.843784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfv4b" event={"ID":"0767ad94-3350-4068-a3af-7ba756b44b2f","Type":"ContainerDied","Data":"513149adebc891cc660a87629456ddb2c206936410bff722a75d98fd7254e7c0"} Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.844015 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfv4b" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.877909 4857 scope.go:117] "RemoveContainer" containerID="ac81a6a01cda02e7186514cf8906abedb1c21cfb8c1027e306c6ccb7c99fd343" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.884871 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfv4b"] Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.893720 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfv4b"] Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.899166 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jls8c\" (UniqueName: \"kubernetes.io/projected/fba802c6-069f-4872-9ed0-24ee26941f0f-kube-api-access-jls8c\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.899214 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.899236 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.899253 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0767ad94-3350-4068-a3af-7ba756b44b2f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.899271 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8g5x\" (UniqueName: \"kubernetes.io/projected/0767ad94-3350-4068-a3af-7ba756b44b2f-kube-api-access-v8g5x\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.914350 4857 scope.go:117] "RemoveContainer" containerID="87949ecd42457c4774918f89169beb8f48c1225b3837136cf375329e4717fef8" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.931865 4857 scope.go:117] "RemoveContainer" containerID="4775c493df87851970d8e42016c5e2d42648011cfdc1133ab7e1cef6e84e854b" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.952460 4857 scope.go:117] "RemoveContainer" containerID="cf742b1b45d819eec18ddffc246cebec0c5344f4dae1aa2356c87c269566aa0e" Nov 28 13:43:46 crc kubenswrapper[4857]: I1128 13:43:46.973510 4857 scope.go:117] "RemoveContainer" containerID="03e0d80c3d1f32bf27a213e42448ff88f2916278e334de1cbf6e2793a7731812" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.420680 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fba802c6-069f-4872-9ed0-24ee26941f0f" (UID: "fba802c6-069f-4872-9ed0-24ee26941f0f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.467362 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rbm42"] Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.470860 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rbm42"] Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.551675 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fba802c6-069f-4872-9ed0-24ee26941f0f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.722389 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.754369 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-catalog-content\") pod \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.754424 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72ffc\" (UniqueName: \"kubernetes.io/projected/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-kube-api-access-72ffc\") pod \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.754500 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-utilities\") pod \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\" (UID: \"243ce4f9-61d4-4a8a-a112-35f3ecdba48a\") " Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.755605 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-utilities" (OuterVolumeSpecName: "utilities") pod "243ce4f9-61d4-4a8a-a112-35f3ecdba48a" (UID: "243ce4f9-61d4-4a8a-a112-35f3ecdba48a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.762237 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-kube-api-access-72ffc" (OuterVolumeSpecName: "kube-api-access-72ffc") pod "243ce4f9-61d4-4a8a-a112-35f3ecdba48a" (UID: "243ce4f9-61d4-4a8a-a112-35f3ecdba48a"). InnerVolumeSpecName "kube-api-access-72ffc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.855366 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72ffc\" (UniqueName: \"kubernetes.io/projected/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-kube-api-access-72ffc\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.855394 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.857591 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v4nk2" event={"ID":"243ce4f9-61d4-4a8a-a112-35f3ecdba48a","Type":"ContainerDied","Data":"6570ca611cbbc16bb35fa69cb0cbc2ce34d95dca2c05e01cea86ac8202a10874"} Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.857651 4857 scope.go:117] "RemoveContainer" containerID="369718bab05d14ae16c302bb14350dd9eae2395f2f98ce4a2c06260365935fa4" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.857848 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v4nk2" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.881211 4857 scope.go:117] "RemoveContainer" containerID="2faf9a8fef874f6818c9b1cb6921e09da4204479730df91094de31ea20797b41" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.897667 4857 scope.go:117] "RemoveContainer" containerID="efa4514dfe054b70403830d7cf3307868106978fb31835823a04d22b6dd5043e" Nov 28 13:43:47 crc kubenswrapper[4857]: I1128 13:43:47.980040 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "243ce4f9-61d4-4a8a-a112-35f3ecdba48a" (UID: "243ce4f9-61d4-4a8a-a112-35f3ecdba48a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.057047 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/243ce4f9-61d4-4a8a-a112-35f3ecdba48a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.193864 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v4nk2"] Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.197992 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v4nk2"] Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.234295 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" path="/var/lib/kubelet/pods/0767ad94-3350-4068-a3af-7ba756b44b2f/volumes" Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.234883 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" path="/var/lib/kubelet/pods/243ce4f9-61d4-4a8a-a112-35f3ecdba48a/volumes" Nov 28 13:43:48 crc kubenswrapper[4857]: I1128 13:43:48.235530 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" path="/var/lib/kubelet/pods/fba802c6-069f-4872-9ed0-24ee26941f0f/volumes" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.910549 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr"] Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911514 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="extract-utilities" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911532 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="extract-utilities" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911542 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="extract-content" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911550 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="extract-content" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911563 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911571 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911580 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="extract-utilities" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911587 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="extract-utilities" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911605 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911613 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911626 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="extract-content" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911634 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="extract-content" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911646 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911654 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911664 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="extract-content" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911671 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="extract-content" Nov 28 13:44:01 crc kubenswrapper[4857]: E1128 13:44:01.911685 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="extract-utilities" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911693 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="extract-utilities" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911799 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fba802c6-069f-4872-9ed0-24ee26941f0f" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911815 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="243ce4f9-61d4-4a8a-a112-35f3ecdba48a" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.911825 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0767ad94-3350-4068-a3af-7ba756b44b2f" containerName="registry-server" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.913098 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.915640 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.922501 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr"] Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.976390 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hb6f\" (UniqueName: \"kubernetes.io/projected/bca267c5-32a8-4b06-8eb4-b19357392900-kube-api-access-7hb6f\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.976477 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:01 crc kubenswrapper[4857]: I1128 13:44:01.976537 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.077497 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hb6f\" (UniqueName: \"kubernetes.io/projected/bca267c5-32a8-4b06-8eb4-b19357392900-kube-api-access-7hb6f\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.077572 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.077594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.078072 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.078121 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.099355 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hb6f\" (UniqueName: \"kubernetes.io/projected/bca267c5-32a8-4b06-8eb4-b19357392900-kube-api-access-7hb6f\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.231298 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.470205 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr"] Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.951280 4857 generic.go:334] "Generic (PLEG): container finished" podID="bca267c5-32a8-4b06-8eb4-b19357392900" containerID="4d99293a9107068ba0390617f166d09043054c6a9551f7b73a0cadb85a575d3d" exitCode=0 Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.951336 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" event={"ID":"bca267c5-32a8-4b06-8eb4-b19357392900","Type":"ContainerDied","Data":"4d99293a9107068ba0390617f166d09043054c6a9551f7b73a0cadb85a575d3d"} Nov 28 13:44:02 crc kubenswrapper[4857]: I1128 13:44:02.951392 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" event={"ID":"bca267c5-32a8-4b06-8eb4-b19357392900","Type":"ContainerStarted","Data":"2438df7b4e9e82d4ecf0ba5e3627d2be3e7770b2eb0db4e91a32572bb3b2b4e8"} Nov 28 13:44:04 crc kubenswrapper[4857]: I1128 13:44:04.964884 4857 generic.go:334] "Generic (PLEG): container finished" podID="bca267c5-32a8-4b06-8eb4-b19357392900" containerID="013c46394eaad3394e1ce1e0b620d3c96ec8e95db3345b5c2867d34b1b2a2c7c" exitCode=0 Nov 28 13:44:04 crc kubenswrapper[4857]: I1128 13:44:04.965001 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" event={"ID":"bca267c5-32a8-4b06-8eb4-b19357392900","Type":"ContainerDied","Data":"013c46394eaad3394e1ce1e0b620d3c96ec8e95db3345b5c2867d34b1b2a2c7c"} Nov 28 13:44:05 crc kubenswrapper[4857]: I1128 13:44:05.973417 4857 generic.go:334] "Generic (PLEG): container finished" podID="bca267c5-32a8-4b06-8eb4-b19357392900" containerID="497b365d9a3cdf0f0535df67e1feb217f50e86aa8efb9710807ce07923ffff58" exitCode=0 Nov 28 13:44:05 crc kubenswrapper[4857]: I1128 13:44:05.973526 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" event={"ID":"bca267c5-32a8-4b06-8eb4-b19357392900","Type":"ContainerDied","Data":"497b365d9a3cdf0f0535df67e1feb217f50e86aa8efb9710807ce07923ffff58"} Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.214396 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.349531 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-bundle\") pod \"bca267c5-32a8-4b06-8eb4-b19357392900\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.350148 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-util\") pod \"bca267c5-32a8-4b06-8eb4-b19357392900\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.350226 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hb6f\" (UniqueName: \"kubernetes.io/projected/bca267c5-32a8-4b06-8eb4-b19357392900-kube-api-access-7hb6f\") pod \"bca267c5-32a8-4b06-8eb4-b19357392900\" (UID: \"bca267c5-32a8-4b06-8eb4-b19357392900\") " Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.352498 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-bundle" (OuterVolumeSpecName: "bundle") pod "bca267c5-32a8-4b06-8eb4-b19357392900" (UID: "bca267c5-32a8-4b06-8eb4-b19357392900"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.357730 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bca267c5-32a8-4b06-8eb4-b19357392900-kube-api-access-7hb6f" (OuterVolumeSpecName: "kube-api-access-7hb6f") pod "bca267c5-32a8-4b06-8eb4-b19357392900" (UID: "bca267c5-32a8-4b06-8eb4-b19357392900"). InnerVolumeSpecName "kube-api-access-7hb6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.370352 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-util" (OuterVolumeSpecName: "util") pod "bca267c5-32a8-4b06-8eb4-b19357392900" (UID: "bca267c5-32a8-4b06-8eb4-b19357392900"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.452242 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.452289 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hb6f\" (UniqueName: \"kubernetes.io/projected/bca267c5-32a8-4b06-8eb4-b19357392900-kube-api-access-7hb6f\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.452308 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/bca267c5-32a8-4b06-8eb4-b19357392900-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.985626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" event={"ID":"bca267c5-32a8-4b06-8eb4-b19357392900","Type":"ContainerDied","Data":"2438df7b4e9e82d4ecf0ba5e3627d2be3e7770b2eb0db4e91a32572bb3b2b4e8"} Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.985665 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2438df7b4e9e82d4ecf0ba5e3627d2be3e7770b2eb0db4e91a32572bb3b2b4e8" Nov 28 13:44:07 crc kubenswrapper[4857]: I1128 13:44:07.985737 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.497907 4857 scope.go:117] "RemoveContainer" containerID="3b557e5ba53b519b38b97316b1a4d239ea3f9ddfbff13b89824be137e6190389" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.513217 4857 scope.go:117] "RemoveContainer" containerID="44347af9a835a3f5f3e86e0ea9846fc04acef0c3f502950bf5960044eaa00c80" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.529532 4857 scope.go:117] "RemoveContainer" containerID="d057d540d993c8ef4efeb5aaa3c882c6513f5a748d2aae387ffcc86bcfb02720" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.541634 4857 scope.go:117] "RemoveContainer" containerID="8d61c48bd240863a4d3a089bd3d2dfa1fc73910d57ca5aff84737944d59816d1" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.557612 4857 scope.go:117] "RemoveContainer" containerID="5c0e67229c74176d01bba2125bbdb7df96ffa85b3e5b4a354de21ceae2afb218" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.570749 4857 scope.go:117] "RemoveContainer" containerID="e9d2fb3582453cda1d291d339591ed1a565c00d5e486c990d46e15881b8e45db" Nov 28 13:44:10 crc kubenswrapper[4857]: I1128 13:44:10.593217 4857 scope.go:117] "RemoveContainer" containerID="2f2143955fc9176d415a6e765e33f6a5fdf28352081d64c27f5ed62bda7bbb15" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.144405 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz"] Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.144712 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="util" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.144726 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="util" Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.144744 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="extract" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.144774 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="extract" Nov 28 13:44:11 crc kubenswrapper[4857]: E1128 13:44:11.144794 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="pull" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.144803 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="pull" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.144975 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bca267c5-32a8-4b06-8eb4-b19357392900" containerName="extract" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.145656 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.148301 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-mcx2q" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.148731 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.149342 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.152498 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz"] Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.204173 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vccx9\" (UniqueName: \"kubernetes.io/projected/9ccf01e3-1b00-414a-aa76-5a391a57e76e-kube-api-access-vccx9\") pod \"nmstate-operator-5b5b58f5c8-d2ffz\" (UID: \"9ccf01e3-1b00-414a-aa76-5a391a57e76e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.305202 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vccx9\" (UniqueName: \"kubernetes.io/projected/9ccf01e3-1b00-414a-aa76-5a391a57e76e-kube-api-access-vccx9\") pod \"nmstate-operator-5b5b58f5c8-d2ffz\" (UID: \"9ccf01e3-1b00-414a-aa76-5a391a57e76e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.331922 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vccx9\" (UniqueName: \"kubernetes.io/projected/9ccf01e3-1b00-414a-aa76-5a391a57e76e-kube-api-access-vccx9\") pod \"nmstate-operator-5b5b58f5c8-d2ffz\" (UID: \"9ccf01e3-1b00-414a-aa76-5a391a57e76e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.458879 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" Nov 28 13:44:11 crc kubenswrapper[4857]: I1128 13:44:11.707801 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz"] Nov 28 13:44:11 crc kubenswrapper[4857]: W1128 13:44:11.718211 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ccf01e3_1b00_414a_aa76_5a391a57e76e.slice/crio-fa042a04c5d08b11115bced43512ba40e7590196319866ced43c0fa7313956cd WatchSource:0}: Error finding container fa042a04c5d08b11115bced43512ba40e7590196319866ced43c0fa7313956cd: Status 404 returned error can't find the container with id fa042a04c5d08b11115bced43512ba40e7590196319866ced43c0fa7313956cd Nov 28 13:44:12 crc kubenswrapper[4857]: I1128 13:44:12.018342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" event={"ID":"9ccf01e3-1b00-414a-aa76-5a391a57e76e","Type":"ContainerStarted","Data":"fa042a04c5d08b11115bced43512ba40e7590196319866ced43c0fa7313956cd"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.052623 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" event={"ID":"9ccf01e3-1b00-414a-aa76-5a391a57e76e","Type":"ContainerStarted","Data":"8c2a352d95705f650da7b7318b525cb471b8cd0a5ac6945a9582745defdf3575"} Nov 28 13:44:15 crc kubenswrapper[4857]: I1128 13:44:15.074028 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-d2ffz" podStartSLOduration=1.831906435 podStartE2EDuration="4.074007993s" podCreationTimestamp="2025-11-28 13:44:11 +0000 UTC" firstStartedPulling="2025-11-28 13:44:11.720654723 +0000 UTC m=+901.844596160" lastFinishedPulling="2025-11-28 13:44:13.962756261 +0000 UTC m=+904.086697718" observedRunningTime="2025-11-28 13:44:15.071264168 +0000 UTC m=+905.195205625" watchObservedRunningTime="2025-11-28 13:44:15.074007993 +0000 UTC m=+905.197949450" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.470499 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.472016 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.473904 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-vgkg4" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.484715 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.486380 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.488718 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.489046 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.507049 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.512082 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-927cv"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.512857 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.603552 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.604440 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.606163 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-ldfgq" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.606396 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.607837 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.610680 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8lb8\" (UniqueName: \"kubernetes.io/projected/77ac9288-6383-4345-bffd-2aadbec644af-kube-api-access-s8lb8\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.610731 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/77ac9288-6383-4345-bffd-2aadbec644af-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.610783 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gznrd\" (UniqueName: \"kubernetes.io/projected/37a08f60-3d10-4701-95b8-7018de948df4-kube-api-access-gznrd\") pod \"nmstate-metrics-7f946cbc9-vrprc\" (UID: \"37a08f60-3d10-4701-95b8-7018de948df4\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.615791 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712433 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf2hq\" (UniqueName: \"kubernetes.io/projected/89385bef-e7f9-4889-b992-7049d6d84f97-kube-api-access-cf2hq\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712533 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-ovs-socket\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712588 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gznrd\" (UniqueName: \"kubernetes.io/projected/37a08f60-3d10-4701-95b8-7018de948df4-kube-api-access-gznrd\") pod \"nmstate-metrics-7f946cbc9-vrprc\" (UID: \"37a08f60-3d10-4701-95b8-7018de948df4\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712636 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b82e06c-e622-4118-955f-3d191b41077e-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712716 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kthng\" (UniqueName: \"kubernetes.io/projected/0b82e06c-e622-4118-955f-3d191b41077e-kube-api-access-kthng\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712786 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-dbus-socket\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712821 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0b82e06c-e622-4118-955f-3d191b41077e-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712862 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8lb8\" (UniqueName: \"kubernetes.io/projected/77ac9288-6383-4345-bffd-2aadbec644af-kube-api-access-s8lb8\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712902 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-nmstate-lock\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.712980 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/77ac9288-6383-4345-bffd-2aadbec644af-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:21 crc kubenswrapper[4857]: E1128 13:44:21.713150 4857 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 28 13:44:21 crc kubenswrapper[4857]: E1128 13:44:21.713231 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/77ac9288-6383-4345-bffd-2aadbec644af-tls-key-pair podName:77ac9288-6383-4345-bffd-2aadbec644af nodeName:}" failed. No retries permitted until 2025-11-28 13:44:22.213206199 +0000 UTC m=+912.337147676 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/77ac9288-6383-4345-bffd-2aadbec644af-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-pwxgk" (UID: "77ac9288-6383-4345-bffd-2aadbec644af") : secret "openshift-nmstate-webhook" not found Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.744074 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gznrd\" (UniqueName: \"kubernetes.io/projected/37a08f60-3d10-4701-95b8-7018de948df4-kube-api-access-gznrd\") pod \"nmstate-metrics-7f946cbc9-vrprc\" (UID: \"37a08f60-3d10-4701-95b8-7018de948df4\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.746309 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8lb8\" (UniqueName: \"kubernetes.io/projected/77ac9288-6383-4345-bffd-2aadbec644af-kube-api-access-s8lb8\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.792474 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-67bf585c5f-wk8qv"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.793091 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.821429 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-67bf585c5f-wk8qv"] Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.831766 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836429 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-nmstate-lock\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836542 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-trusted-ca-bundle\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836594 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-service-ca\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836622 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf2hq\" (UniqueName: \"kubernetes.io/projected/89385bef-e7f9-4889-b992-7049d6d84f97-kube-api-access-cf2hq\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836659 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-oauth-config\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836703 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-serving-cert\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836737 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-ovs-socket\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836762 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-oauth-serving-cert\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836797 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b82e06c-e622-4118-955f-3d191b41077e-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836846 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8sq8\" (UniqueName: \"kubernetes.io/projected/dcfbc948-be7e-49c8-aa2f-a434a950c498-kube-api-access-l8sq8\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836885 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kthng\" (UniqueName: \"kubernetes.io/projected/0b82e06c-e622-4118-955f-3d191b41077e-kube-api-access-kthng\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.836921 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-config\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.837021 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-dbus-socket\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.837062 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0b82e06c-e622-4118-955f-3d191b41077e-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.837832 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0b82e06c-e622-4118-955f-3d191b41077e-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.837889 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-nmstate-lock\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.838311 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-ovs-socket\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.838548 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/89385bef-e7f9-4889-b992-7049d6d84f97-dbus-socket\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.851533 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0b82e06c-e622-4118-955f-3d191b41077e-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.852796 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf2hq\" (UniqueName: \"kubernetes.io/projected/89385bef-e7f9-4889-b992-7049d6d84f97-kube-api-access-cf2hq\") pod \"nmstate-handler-927cv\" (UID: \"89385bef-e7f9-4889-b992-7049d6d84f97\") " pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.855286 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kthng\" (UniqueName: \"kubernetes.io/projected/0b82e06c-e622-4118-955f-3d191b41077e-kube-api-access-kthng\") pod \"nmstate-console-plugin-7fbb5f6569-6m9c9\" (UID: \"0b82e06c-e622-4118-955f-3d191b41077e\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.923541 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938287 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-serving-cert\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938332 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-oauth-serving-cert\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938359 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8sq8\" (UniqueName: \"kubernetes.io/projected/dcfbc948-be7e-49c8-aa2f-a434a950c498-kube-api-access-l8sq8\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938381 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-config\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938447 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-trusted-ca-bundle\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938471 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-service-ca\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.938490 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-oauth-config\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.940325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-oauth-serving-cert\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.940644 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-config\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.941809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-service-ca\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.941851 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/dcfbc948-be7e-49c8-aa2f-a434a950c498-trusted-ca-bundle\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.948301 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-serving-cert\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.951125 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/dcfbc948-be7e-49c8-aa2f-a434a950c498-console-oauth-config\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:21 crc kubenswrapper[4857]: I1128 13:44:21.958689 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8sq8\" (UniqueName: \"kubernetes.io/projected/dcfbc948-be7e-49c8-aa2f-a434a950c498-kube-api-access-l8sq8\") pod \"console-67bf585c5f-wk8qv\" (UID: \"dcfbc948-be7e-49c8-aa2f-a434a950c498\") " pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.031121 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc"] Nov 28 13:44:22 crc kubenswrapper[4857]: W1128 13:44:22.038909 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a08f60_3d10_4701_95b8_7018de948df4.slice/crio-05cdc8f7b27008bbdb8ed1fd1b13cc5800e12b2bcc5d14ec3fd90b0656c59fb0 WatchSource:0}: Error finding container 05cdc8f7b27008bbdb8ed1fd1b13cc5800e12b2bcc5d14ec3fd90b0656c59fb0: Status 404 returned error can't find the container with id 05cdc8f7b27008bbdb8ed1fd1b13cc5800e12b2bcc5d14ec3fd90b0656c59fb0 Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.092916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" event={"ID":"37a08f60-3d10-4701-95b8-7018de948df4","Type":"ContainerStarted","Data":"05cdc8f7b27008bbdb8ed1fd1b13cc5800e12b2bcc5d14ec3fd90b0656c59fb0"} Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.125248 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9"] Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.132771 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:22 crc kubenswrapper[4857]: W1128 13:44:22.133303 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b82e06c_e622_4118_955f_3d191b41077e.slice/crio-36116569460c03861d6813e65390594400707f77a603aa0f2ff38d5cbe925f93 WatchSource:0}: Error finding container 36116569460c03861d6813e65390594400707f77a603aa0f2ff38d5cbe925f93: Status 404 returned error can't find the container with id 36116569460c03861d6813e65390594400707f77a603aa0f2ff38d5cbe925f93 Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.146804 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:22 crc kubenswrapper[4857]: W1128 13:44:22.170128 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89385bef_e7f9_4889_b992_7049d6d84f97.slice/crio-dc6fa4e37bf7a3ed7102290f6f3686228b4139fa57fd6943dcd7390e3c00d214 WatchSource:0}: Error finding container dc6fa4e37bf7a3ed7102290f6f3686228b4139fa57fd6943dcd7390e3c00d214: Status 404 returned error can't find the container with id dc6fa4e37bf7a3ed7102290f6f3686228b4139fa57fd6943dcd7390e3c00d214 Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.242197 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/77ac9288-6383-4345-bffd-2aadbec644af-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.246865 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/77ac9288-6383-4345-bffd-2aadbec644af-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-pwxgk\" (UID: \"77ac9288-6383-4345-bffd-2aadbec644af\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.324923 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-67bf585c5f-wk8qv"] Nov 28 13:44:22 crc kubenswrapper[4857]: W1128 13:44:22.332225 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddcfbc948_be7e_49c8_aa2f_a434a950c498.slice/crio-632cf94f32695bb1997e4c0ea41e6e1467ad79013ba5d6b2ed82c39a34bd099c WatchSource:0}: Error finding container 632cf94f32695bb1997e4c0ea41e6e1467ad79013ba5d6b2ed82c39a34bd099c: Status 404 returned error can't find the container with id 632cf94f32695bb1997e4c0ea41e6e1467ad79013ba5d6b2ed82c39a34bd099c Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.438369 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:22 crc kubenswrapper[4857]: I1128 13:44:22.633185 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk"] Nov 28 13:44:22 crc kubenswrapper[4857]: W1128 13:44:22.641302 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77ac9288_6383_4345_bffd_2aadbec644af.slice/crio-b2f0f6aedb375ac0e02afe35aa8da0a7f370c96d380683852bd1f206a775d7d5 WatchSource:0}: Error finding container b2f0f6aedb375ac0e02afe35aa8da0a7f370c96d380683852bd1f206a775d7d5: Status 404 returned error can't find the container with id b2f0f6aedb375ac0e02afe35aa8da0a7f370c96d380683852bd1f206a775d7d5 Nov 28 13:44:23 crc kubenswrapper[4857]: I1128 13:44:23.100464 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" event={"ID":"0b82e06c-e622-4118-955f-3d191b41077e","Type":"ContainerStarted","Data":"36116569460c03861d6813e65390594400707f77a603aa0f2ff38d5cbe925f93"} Nov 28 13:44:23 crc kubenswrapper[4857]: I1128 13:44:23.103431 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" event={"ID":"77ac9288-6383-4345-bffd-2aadbec644af","Type":"ContainerStarted","Data":"b2f0f6aedb375ac0e02afe35aa8da0a7f370c96d380683852bd1f206a775d7d5"} Nov 28 13:44:23 crc kubenswrapper[4857]: I1128 13:44:23.107014 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-927cv" event={"ID":"89385bef-e7f9-4889-b992-7049d6d84f97","Type":"ContainerStarted","Data":"dc6fa4e37bf7a3ed7102290f6f3686228b4139fa57fd6943dcd7390e3c00d214"} Nov 28 13:44:23 crc kubenswrapper[4857]: I1128 13:44:23.113277 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-67bf585c5f-wk8qv" event={"ID":"dcfbc948-be7e-49c8-aa2f-a434a950c498","Type":"ContainerStarted","Data":"26e25ee4e69fa448453d32c7c140388776d00394f90fb69ec1e9777e4f4c49c3"} Nov 28 13:44:23 crc kubenswrapper[4857]: I1128 13:44:23.113649 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-67bf585c5f-wk8qv" event={"ID":"dcfbc948-be7e-49c8-aa2f-a434a950c498","Type":"ContainerStarted","Data":"632cf94f32695bb1997e4c0ea41e6e1467ad79013ba5d6b2ed82c39a34bd099c"} Nov 28 13:44:23 crc kubenswrapper[4857]: I1128 13:44:23.147366 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-67bf585c5f-wk8qv" podStartSLOduration=2.14732852 podStartE2EDuration="2.14732852s" podCreationTimestamp="2025-11-28 13:44:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:44:23.136015681 +0000 UTC m=+913.259957158" watchObservedRunningTime="2025-11-28 13:44:23.14732852 +0000 UTC m=+913.271269997" Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.127791 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" event={"ID":"0b82e06c-e622-4118-955f-3d191b41077e","Type":"ContainerStarted","Data":"9a9c62d883bb054393b42ae7c7e45bcb757e18ac0fb1d7024addcb2a9fa84b06"} Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.131112 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" event={"ID":"37a08f60-3d10-4701-95b8-7018de948df4","Type":"ContainerStarted","Data":"817ddf9114b35407807c674376e4be75ff044f35ffa0702ad1bdfb4cbd255d69"} Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.132851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" event={"ID":"77ac9288-6383-4345-bffd-2aadbec644af","Type":"ContainerStarted","Data":"806cb5bbd1efb201819539a2c17df13c8a291bc3c5af3b5e780434ea66b39417"} Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.132978 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.134077 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-927cv" event={"ID":"89385bef-e7f9-4889-b992-7049d6d84f97","Type":"ContainerStarted","Data":"aef8c54cdec97fc88743331e8774a28ad01a3484ed5621e0bf78cb1925a12b23"} Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.134240 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.143644 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-6m9c9" podStartSLOduration=1.771244278 podStartE2EDuration="4.143623204s" podCreationTimestamp="2025-11-28 13:44:21 +0000 UTC" firstStartedPulling="2025-11-28 13:44:22.140161961 +0000 UTC m=+912.264103398" lastFinishedPulling="2025-11-28 13:44:24.512540887 +0000 UTC m=+914.636482324" observedRunningTime="2025-11-28 13:44:25.142689399 +0000 UTC m=+915.266630846" watchObservedRunningTime="2025-11-28 13:44:25.143623204 +0000 UTC m=+915.267564641" Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.161865 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" podStartSLOduration=2.277085144 podStartE2EDuration="4.161844762s" podCreationTimestamp="2025-11-28 13:44:21 +0000 UTC" firstStartedPulling="2025-11-28 13:44:22.644029353 +0000 UTC m=+912.767970790" lastFinishedPulling="2025-11-28 13:44:24.528788971 +0000 UTC m=+914.652730408" observedRunningTime="2025-11-28 13:44:25.161384289 +0000 UTC m=+915.285325746" watchObservedRunningTime="2025-11-28 13:44:25.161844762 +0000 UTC m=+915.285786199" Nov 28 13:44:25 crc kubenswrapper[4857]: I1128 13:44:25.176306 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-927cv" podStartSLOduration=1.8259159010000001 podStartE2EDuration="4.176285256s" podCreationTimestamp="2025-11-28 13:44:21 +0000 UTC" firstStartedPulling="2025-11-28 13:44:22.17269914 +0000 UTC m=+912.296640577" lastFinishedPulling="2025-11-28 13:44:24.523068495 +0000 UTC m=+914.647009932" observedRunningTime="2025-11-28 13:44:25.17496312 +0000 UTC m=+915.298904577" watchObservedRunningTime="2025-11-28 13:44:25.176285256 +0000 UTC m=+915.300226693" Nov 28 13:44:27 crc kubenswrapper[4857]: I1128 13:44:27.148697 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" event={"ID":"37a08f60-3d10-4701-95b8-7018de948df4","Type":"ContainerStarted","Data":"05486dc4fb00a69baeaaae34e5d27c269ced65eb7e937f645c71245fdad97aa8"} Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.133781 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.134250 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.140869 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.168186 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-vrprc" podStartSLOduration=6.683505555 podStartE2EDuration="11.168164574s" podCreationTimestamp="2025-11-28 13:44:21 +0000 UTC" firstStartedPulling="2025-11-28 13:44:22.043979704 +0000 UTC m=+912.167921151" lastFinishedPulling="2025-11-28 13:44:26.528638723 +0000 UTC m=+916.652580170" observedRunningTime="2025-11-28 13:44:27.166391901 +0000 UTC m=+917.290333368" watchObservedRunningTime="2025-11-28 13:44:32.168164574 +0000 UTC m=+922.292106021" Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.176039 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-927cv" Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.185074 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-67bf585c5f-wk8qv" Nov 28 13:44:32 crc kubenswrapper[4857]: I1128 13:44:32.276383 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-6gf7t"] Nov 28 13:44:42 crc kubenswrapper[4857]: I1128 13:44:42.444134 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-pwxgk" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.333616 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-6gf7t" podUID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" containerName="console" containerID="cri-o://2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0" gracePeriod=15 Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.503284 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q"] Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.504596 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.506787 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.514668 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q"] Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.665351 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cqtn\" (UniqueName: \"kubernetes.io/projected/adabc1a1-93dd-4d7a-9971-19c219dafc3c-kube-api-access-8cqtn\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.665542 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.665688 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.767631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.767783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.767927 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cqtn\" (UniqueName: \"kubernetes.io/projected/adabc1a1-93dd-4d7a-9971-19c219dafc3c-kube-api-access-8cqtn\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.768513 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.768523 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.801545 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cqtn\" (UniqueName: \"kubernetes.io/projected/adabc1a1-93dd-4d7a-9971-19c219dafc3c-kube-api-access-8cqtn\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.823516 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.859032 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-6gf7t_5f40ecda-b519-4cfe-8b7b-6854e018fe24/console/0.log" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.859129 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-trusted-ca-bundle\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969445 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-service-ca\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969501 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-oauth-config\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969540 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tv98r\" (UniqueName: \"kubernetes.io/projected/5f40ecda-b519-4cfe-8b7b-6854e018fe24-kube-api-access-tv98r\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969592 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-serving-cert\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969633 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-config\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.969713 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-oauth-serving-cert\") pod \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\" (UID: \"5f40ecda-b519-4cfe-8b7b-6854e018fe24\") " Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.970523 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.970555 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.970557 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-service-ca" (OuterVolumeSpecName: "service-ca") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.970740 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-config" (OuterVolumeSpecName: "console-config") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.979465 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.980124 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f40ecda-b519-4cfe-8b7b-6854e018fe24-kube-api-access-tv98r" (OuterVolumeSpecName: "kube-api-access-tv98r") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "kube-api-access-tv98r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:44:57 crc kubenswrapper[4857]: I1128 13:44:57.980401 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "5f40ecda-b519-4cfe-8b7b-6854e018fe24" (UID: "5f40ecda-b519-4cfe-8b7b-6854e018fe24"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071205 4857 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-service-ca\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071246 4857 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071261 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tv98r\" (UniqueName: \"kubernetes.io/projected/5f40ecda-b519-4cfe-8b7b-6854e018fe24-kube-api-access-tv98r\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071271 4857 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071282 4857 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-console-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071291 4857 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.071301 4857 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5f40ecda-b519-4cfe-8b7b-6854e018fe24-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.095269 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q"] Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.345411 4857 generic.go:334] "Generic (PLEG): container finished" podID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerID="3253548d9fca38bc3757d8ada245b5c522237fdbb280e728821acab04913d486" exitCode=0 Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.345467 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" event={"ID":"adabc1a1-93dd-4d7a-9971-19c219dafc3c","Type":"ContainerDied","Data":"3253548d9fca38bc3757d8ada245b5c522237fdbb280e728821acab04913d486"} Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.345513 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" event={"ID":"adabc1a1-93dd-4d7a-9971-19c219dafc3c","Type":"ContainerStarted","Data":"3f8705e526a4dfcc4d4f55cea7278ae7ac8edf11c204dd0647a4fb58cb5b0d38"} Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.350759 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-6gf7t_5f40ecda-b519-4cfe-8b7b-6854e018fe24/console/0.log" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.350820 4857 generic.go:334] "Generic (PLEG): container finished" podID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" containerID="2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0" exitCode=2 Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.350850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-6gf7t" event={"ID":"5f40ecda-b519-4cfe-8b7b-6854e018fe24","Type":"ContainerDied","Data":"2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0"} Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.350872 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-6gf7t" event={"ID":"5f40ecda-b519-4cfe-8b7b-6854e018fe24","Type":"ContainerDied","Data":"48727e21f4e7089b0ebf5bfd06470f6ac7b11000c5d6418336dcf275dabf402f"} Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.350910 4857 scope.go:117] "RemoveContainer" containerID="2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.351072 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-6gf7t" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.385101 4857 scope.go:117] "RemoveContainer" containerID="2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0" Nov 28 13:44:58 crc kubenswrapper[4857]: E1128 13:44:58.385483 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0\": container with ID starting with 2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0 not found: ID does not exist" containerID="2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.385522 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0"} err="failed to get container status \"2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0\": rpc error: code = NotFound desc = could not find container \"2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0\": container with ID starting with 2f64c9ee64d60e1caeb48290dd26236c5311f2f0ffa2e6ad59c2433f2eaff2b0 not found: ID does not exist" Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.401994 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-6gf7t"] Nov 28 13:44:58 crc kubenswrapper[4857]: I1128 13:44:58.408062 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-6gf7t"] Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.158021 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d"] Nov 28 13:45:00 crc kubenswrapper[4857]: E1128 13:45:00.158574 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" containerName="console" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.158589 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" containerName="console" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.158724 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" containerName="console" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.159191 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.161679 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.161935 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.170313 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d"] Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.238566 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f40ecda-b519-4cfe-8b7b-6854e018fe24" path="/var/lib/kubelet/pods/5f40ecda-b519-4cfe-8b7b-6854e018fe24/volumes" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.316345 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkpn7\" (UniqueName: \"kubernetes.io/projected/ae74a118-9633-40c1-8788-03ac27adef01-kube-api-access-gkpn7\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.316409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae74a118-9633-40c1-8788-03ac27adef01-secret-volume\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.316439 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae74a118-9633-40c1-8788-03ac27adef01-config-volume\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.365477 4857 generic.go:334] "Generic (PLEG): container finished" podID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerID="9c8dde527f737a926db813f14ba82d207d6566720d26162c56a9e0483558106b" exitCode=0 Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.365530 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" event={"ID":"adabc1a1-93dd-4d7a-9971-19c219dafc3c","Type":"ContainerDied","Data":"9c8dde527f737a926db813f14ba82d207d6566720d26162c56a9e0483558106b"} Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.417989 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkpn7\" (UniqueName: \"kubernetes.io/projected/ae74a118-9633-40c1-8788-03ac27adef01-kube-api-access-gkpn7\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.418062 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae74a118-9633-40c1-8788-03ac27adef01-secret-volume\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.418094 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae74a118-9633-40c1-8788-03ac27adef01-config-volume\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.419885 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae74a118-9633-40c1-8788-03ac27adef01-config-volume\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.425479 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae74a118-9633-40c1-8788-03ac27adef01-secret-volume\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.440982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkpn7\" (UniqueName: \"kubernetes.io/projected/ae74a118-9633-40c1-8788-03ac27adef01-kube-api-access-gkpn7\") pod \"collect-profiles-29405625-rm96d\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:00 crc kubenswrapper[4857]: I1128 13:45:00.522327 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.006363 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d"] Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.371750 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" event={"ID":"ae74a118-9633-40c1-8788-03ac27adef01","Type":"ContainerStarted","Data":"078b73789d96dda3a2a411da48214510e6d9a901f8d426e31808296793d96a20"} Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.371793 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" event={"ID":"ae74a118-9633-40c1-8788-03ac27adef01","Type":"ContainerStarted","Data":"80ede64479abe5618f5ddfd5fbe10fc33269ab19fa78a0650f0c18e50310b566"} Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.374331 4857 generic.go:334] "Generic (PLEG): container finished" podID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerID="952829a4a7df6f1cb27a6112d0d5c0548af8254c4219d5a08b1e30f882e6f40d" exitCode=0 Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.374428 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" event={"ID":"adabc1a1-93dd-4d7a-9971-19c219dafc3c","Type":"ContainerDied","Data":"952829a4a7df6f1cb27a6112d0d5c0548af8254c4219d5a08b1e30f882e6f40d"} Nov 28 13:45:01 crc kubenswrapper[4857]: I1128 13:45:01.387320 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" podStartSLOduration=1.387305257 podStartE2EDuration="1.387305257s" podCreationTimestamp="2025-11-28 13:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:45:01.386468664 +0000 UTC m=+951.510410101" watchObservedRunningTime="2025-11-28 13:45:01.387305257 +0000 UTC m=+951.511246694" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.388849 4857 generic.go:334] "Generic (PLEG): container finished" podID="ae74a118-9633-40c1-8788-03ac27adef01" containerID="078b73789d96dda3a2a411da48214510e6d9a901f8d426e31808296793d96a20" exitCode=0 Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.388921 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" event={"ID":"ae74a118-9633-40c1-8788-03ac27adef01","Type":"ContainerDied","Data":"078b73789d96dda3a2a411da48214510e6d9a901f8d426e31808296793d96a20"} Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.612396 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.753153 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-bundle\") pod \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.753589 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-util\") pod \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.753698 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cqtn\" (UniqueName: \"kubernetes.io/projected/adabc1a1-93dd-4d7a-9971-19c219dafc3c-kube-api-access-8cqtn\") pod \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\" (UID: \"adabc1a1-93dd-4d7a-9971-19c219dafc3c\") " Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.754436 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-bundle" (OuterVolumeSpecName: "bundle") pod "adabc1a1-93dd-4d7a-9971-19c219dafc3c" (UID: "adabc1a1-93dd-4d7a-9971-19c219dafc3c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.758993 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adabc1a1-93dd-4d7a-9971-19c219dafc3c-kube-api-access-8cqtn" (OuterVolumeSpecName: "kube-api-access-8cqtn") pod "adabc1a1-93dd-4d7a-9971-19c219dafc3c" (UID: "adabc1a1-93dd-4d7a-9971-19c219dafc3c"). InnerVolumeSpecName "kube-api-access-8cqtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.787092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-util" (OuterVolumeSpecName: "util") pod "adabc1a1-93dd-4d7a-9971-19c219dafc3c" (UID: "adabc1a1-93dd-4d7a-9971-19c219dafc3c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.855350 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.855388 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/adabc1a1-93dd-4d7a-9971-19c219dafc3c-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:02 crc kubenswrapper[4857]: I1128 13:45:02.855402 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cqtn\" (UniqueName: \"kubernetes.io/projected/adabc1a1-93dd-4d7a-9971-19c219dafc3c-kube-api-access-8cqtn\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.399926 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" event={"ID":"adabc1a1-93dd-4d7a-9971-19c219dafc3c","Type":"ContainerDied","Data":"3f8705e526a4dfcc4d4f55cea7278ae7ac8edf11c204dd0647a4fb58cb5b0d38"} Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.399973 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.399986 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f8705e526a4dfcc4d4f55cea7278ae7ac8edf11c204dd0647a4fb58cb5b0d38" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.729120 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.872772 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae74a118-9633-40c1-8788-03ac27adef01-secret-volume\") pod \"ae74a118-9633-40c1-8788-03ac27adef01\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.872882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkpn7\" (UniqueName: \"kubernetes.io/projected/ae74a118-9633-40c1-8788-03ac27adef01-kube-api-access-gkpn7\") pod \"ae74a118-9633-40c1-8788-03ac27adef01\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.873023 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae74a118-9633-40c1-8788-03ac27adef01-config-volume\") pod \"ae74a118-9633-40c1-8788-03ac27adef01\" (UID: \"ae74a118-9633-40c1-8788-03ac27adef01\") " Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.873650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae74a118-9633-40c1-8788-03ac27adef01-config-volume" (OuterVolumeSpecName: "config-volume") pod "ae74a118-9633-40c1-8788-03ac27adef01" (UID: "ae74a118-9633-40c1-8788-03ac27adef01"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.877525 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae74a118-9633-40c1-8788-03ac27adef01-kube-api-access-gkpn7" (OuterVolumeSpecName: "kube-api-access-gkpn7") pod "ae74a118-9633-40c1-8788-03ac27adef01" (UID: "ae74a118-9633-40c1-8788-03ac27adef01"). InnerVolumeSpecName "kube-api-access-gkpn7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.878441 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ae74a118-9633-40c1-8788-03ac27adef01-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ae74a118-9633-40c1-8788-03ac27adef01" (UID: "ae74a118-9633-40c1-8788-03ac27adef01"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.974778 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ae74a118-9633-40c1-8788-03ac27adef01-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.974813 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ae74a118-9633-40c1-8788-03ac27adef01-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:03 crc kubenswrapper[4857]: I1128 13:45:03.974825 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkpn7\" (UniqueName: \"kubernetes.io/projected/ae74a118-9633-40c1-8788-03ac27adef01-kube-api-access-gkpn7\") on node \"crc\" DevicePath \"\"" Nov 28 13:45:04 crc kubenswrapper[4857]: I1128 13:45:04.405640 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" event={"ID":"ae74a118-9633-40c1-8788-03ac27adef01","Type":"ContainerDied","Data":"80ede64479abe5618f5ddfd5fbe10fc33269ab19fa78a0650f0c18e50310b566"} Nov 28 13:45:04 crc kubenswrapper[4857]: I1128 13:45:04.405679 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80ede64479abe5618f5ddfd5fbe10fc33269ab19fa78a0650f0c18e50310b566" Nov 28 13:45:04 crc kubenswrapper[4857]: I1128 13:45:04.405700 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.577909 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl"] Nov 28 13:45:12 crc kubenswrapper[4857]: E1128 13:45:12.578941 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="util" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.578978 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="util" Nov 28 13:45:12 crc kubenswrapper[4857]: E1128 13:45:12.578991 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="pull" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.578999 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="pull" Nov 28 13:45:12 crc kubenswrapper[4857]: E1128 13:45:12.579012 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="extract" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.579020 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="extract" Nov 28 13:45:12 crc kubenswrapper[4857]: E1128 13:45:12.579050 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae74a118-9633-40c1-8788-03ac27adef01" containerName="collect-profiles" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.579058 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae74a118-9633-40c1-8788-03ac27adef01" containerName="collect-profiles" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.579190 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="adabc1a1-93dd-4d7a-9971-19c219dafc3c" containerName="extract" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.579212 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae74a118-9633-40c1-8788-03ac27adef01" containerName="collect-profiles" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.579699 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.581910 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-cwjwv" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.582077 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.582290 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.582400 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.582511 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.600320 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl"] Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.683155 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2819a401-96f9-4d83-a331-70e053cb7226-webhook-cert\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.683470 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2819a401-96f9-4d83-a331-70e053cb7226-apiservice-cert\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.683494 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c579d\" (UniqueName: \"kubernetes.io/projected/2819a401-96f9-4d83-a331-70e053cb7226-kube-api-access-c579d\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.784738 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2819a401-96f9-4d83-a331-70e053cb7226-webhook-cert\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.784783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2819a401-96f9-4d83-a331-70e053cb7226-apiservice-cert\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.784807 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c579d\" (UniqueName: \"kubernetes.io/projected/2819a401-96f9-4d83-a331-70e053cb7226-kube-api-access-c579d\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.790701 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2819a401-96f9-4d83-a331-70e053cb7226-apiservice-cert\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.800445 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c579d\" (UniqueName: \"kubernetes.io/projected/2819a401-96f9-4d83-a331-70e053cb7226-kube-api-access-c579d\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.802999 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2819a401-96f9-4d83-a331-70e053cb7226-webhook-cert\") pod \"metallb-operator-controller-manager-57db9444fc-x2xfl\" (UID: \"2819a401-96f9-4d83-a331-70e053cb7226\") " pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.822991 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz"] Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.823653 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.826240 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.826486 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.827000 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-mj6n4" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.849823 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz"] Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.896528 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.987161 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l77zn\" (UniqueName: \"kubernetes.io/projected/c72814a1-86c3-4023-94fe-c492625d1c6c-kube-api-access-l77zn\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.987222 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c72814a1-86c3-4023-94fe-c492625d1c6c-webhook-cert\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:12 crc kubenswrapper[4857]: I1128 13:45:12.987240 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c72814a1-86c3-4023-94fe-c492625d1c6c-apiservice-cert\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.088539 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l77zn\" (UniqueName: \"kubernetes.io/projected/c72814a1-86c3-4023-94fe-c492625d1c6c-kube-api-access-l77zn\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.088874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c72814a1-86c3-4023-94fe-c492625d1c6c-webhook-cert\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.088893 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c72814a1-86c3-4023-94fe-c492625d1c6c-apiservice-cert\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.103780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c72814a1-86c3-4023-94fe-c492625d1c6c-webhook-cert\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.105582 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c72814a1-86c3-4023-94fe-c492625d1c6c-apiservice-cert\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.113010 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l77zn\" (UniqueName: \"kubernetes.io/projected/c72814a1-86c3-4023-94fe-c492625d1c6c-kube-api-access-l77zn\") pod \"metallb-operator-webhook-server-f48cdcb6d-5fgqz\" (UID: \"c72814a1-86c3-4023-94fe-c492625d1c6c\") " pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.138484 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl"] Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.144764 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.344470 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz"] Nov 28 13:45:13 crc kubenswrapper[4857]: W1128 13:45:13.346660 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc72814a1_86c3_4023_94fe_c492625d1c6c.slice/crio-6d15fd9ca356e83deb4351ca614e1a5493ff7e1be7f8bec28f1c247a252cf2ef WatchSource:0}: Error finding container 6d15fd9ca356e83deb4351ca614e1a5493ff7e1be7f8bec28f1c247a252cf2ef: Status 404 returned error can't find the container with id 6d15fd9ca356e83deb4351ca614e1a5493ff7e1be7f8bec28f1c247a252cf2ef Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.453520 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" event={"ID":"2819a401-96f9-4d83-a331-70e053cb7226","Type":"ContainerStarted","Data":"3bf40d1bec68903f1481b76ccf529d2f552359186d56926d301c83b82df5a359"} Nov 28 13:45:13 crc kubenswrapper[4857]: I1128 13:45:13.455172 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" event={"ID":"c72814a1-86c3-4023-94fe-c492625d1c6c","Type":"ContainerStarted","Data":"6d15fd9ca356e83deb4351ca614e1a5493ff7e1be7f8bec28f1c247a252cf2ef"} Nov 28 13:45:18 crc kubenswrapper[4857]: I1128 13:45:18.492517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" event={"ID":"2819a401-96f9-4d83-a331-70e053cb7226","Type":"ContainerStarted","Data":"d2d69a8355f0b5a739a59a97a8d12183f3ed5be339e5d4c81cf056ce6a2fce13"} Nov 28 13:45:18 crc kubenswrapper[4857]: I1128 13:45:18.493994 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" event={"ID":"c72814a1-86c3-4023-94fe-c492625d1c6c","Type":"ContainerStarted","Data":"bd99c627ea405d6778f12a84548f7dbc546eee3d827d91dbfed42446597bbba9"} Nov 28 13:45:18 crc kubenswrapper[4857]: I1128 13:45:18.494093 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:18 crc kubenswrapper[4857]: I1128 13:45:18.494160 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:18 crc kubenswrapper[4857]: I1128 13:45:18.508622 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" podStartSLOduration=3.5811044709999997 podStartE2EDuration="6.50860709s" podCreationTimestamp="2025-11-28 13:45:12 +0000 UTC" firstStartedPulling="2025-11-28 13:45:13.154877424 +0000 UTC m=+963.278818861" lastFinishedPulling="2025-11-28 13:45:16.082380043 +0000 UTC m=+966.206321480" observedRunningTime="2025-11-28 13:45:18.506190584 +0000 UTC m=+968.630132021" watchObservedRunningTime="2025-11-28 13:45:18.50860709 +0000 UTC m=+968.632548527" Nov 28 13:45:18 crc kubenswrapper[4857]: I1128 13:45:18.529559 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" podStartSLOduration=2.318451285 podStartE2EDuration="6.529537022s" podCreationTimestamp="2025-11-28 13:45:12 +0000 UTC" firstStartedPulling="2025-11-28 13:45:13.349468069 +0000 UTC m=+963.473409506" lastFinishedPulling="2025-11-28 13:45:17.560553806 +0000 UTC m=+967.684495243" observedRunningTime="2025-11-28 13:45:18.524847364 +0000 UTC m=+968.648788821" watchObservedRunningTime="2025-11-28 13:45:18.529537022 +0000 UTC m=+968.653478459" Nov 28 13:45:33 crc kubenswrapper[4857]: I1128 13:45:33.151034 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-f48cdcb6d-5fgqz" Nov 28 13:45:41 crc kubenswrapper[4857]: I1128 13:45:41.309283 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:45:41 crc kubenswrapper[4857]: I1128 13:45:41.310727 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:45:52 crc kubenswrapper[4857]: I1128 13:45:52.901335 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-57db9444fc-x2xfl" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.786640 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-nxzbf"] Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.790530 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.795812 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw"] Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.796598 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.796807 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-pbdc2" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.798734 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.802050 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.815384 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.822746 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw"] Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.873937 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-566t2"] Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.874963 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-566t2" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.878655 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.878865 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.879060 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.879222 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-gcldk" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.892857 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-h9ldf"] Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.893801 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.895995 4857 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901464 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwm4g\" (UniqueName: \"kubernetes.io/projected/c325af37-018c-4ea2-b63d-4ab931a9a17c-kube-api-access-mwm4g\") pod \"frr-k8s-webhook-server-7fcb986d4-jdrbw\" (UID: \"c325af37-018c-4ea2-b63d-4ab931a9a17c\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901507 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c325af37-018c-4ea2-b63d-4ab931a9a17c-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-jdrbw\" (UID: \"c325af37-018c-4ea2-b63d-4ab931a9a17c\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901535 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-startup\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901554 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fwxl\" (UniqueName: \"kubernetes.io/projected/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-kube-api-access-5fwxl\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901592 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-metrics\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901612 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-reloader\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901655 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-conf\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901678 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-metrics-certs\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.901699 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-sockets\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:53 crc kubenswrapper[4857]: I1128 13:45:53.907710 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-h9ldf"] Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.002900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-cert\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.002963 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-startup\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.003022 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fwxl\" (UniqueName: \"kubernetes.io/projected/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-kube-api-access-5fwxl\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.003907 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-metrics\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004184 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-metrics-certs\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004207 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-reloader\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-conf\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004468 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-metrics-certs\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-sockets\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004507 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004524 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8sr9\" (UniqueName: \"kubernetes.io/projected/301b4f9c-0612-40ff-8009-cbf39f36e85e-kube-api-access-c8sr9\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004551 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnbvt\" (UniqueName: \"kubernetes.io/projected/e71259d7-d2b0-45eb-ad34-2c9c7687b885-kube-api-access-vnbvt\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-metrics-certs\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004588 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwm4g\" (UniqueName: \"kubernetes.io/projected/c325af37-018c-4ea2-b63d-4ab931a9a17c-kube-api-access-mwm4g\") pod \"frr-k8s-webhook-server-7fcb986d4-jdrbw\" (UID: \"c325af37-018c-4ea2-b63d-4ab931a9a17c\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004610 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e71259d7-d2b0-45eb-ad34-2c9c7687b885-metallb-excludel2\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c325af37-018c-4ea2-b63d-4ab931a9a17c-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-jdrbw\" (UID: \"c325af37-018c-4ea2-b63d-4ab931a9a17c\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.003833 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-startup\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.005356 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-conf\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004139 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-metrics\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.004418 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-reloader\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.005772 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-frr-sockets\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.010692 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-metrics-certs\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.011112 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c325af37-018c-4ea2-b63d-4ab931a9a17c-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-jdrbw\" (UID: \"c325af37-018c-4ea2-b63d-4ab931a9a17c\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.022434 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fwxl\" (UniqueName: \"kubernetes.io/projected/85bce704-7efb-4e9c-a6b0-c2ebbe46877d-kube-api-access-5fwxl\") pod \"frr-k8s-nxzbf\" (UID: \"85bce704-7efb-4e9c-a6b0-c2ebbe46877d\") " pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.029874 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwm4g\" (UniqueName: \"kubernetes.io/projected/c325af37-018c-4ea2-b63d-4ab931a9a17c-kube-api-access-mwm4g\") pod \"frr-k8s-webhook-server-7fcb986d4-jdrbw\" (UID: \"c325af37-018c-4ea2-b63d-4ab931a9a17c\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.106012 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.106387 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8sr9\" (UniqueName: \"kubernetes.io/projected/301b4f9c-0612-40ff-8009-cbf39f36e85e-kube-api-access-c8sr9\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.106585 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnbvt\" (UniqueName: \"kubernetes.io/projected/e71259d7-d2b0-45eb-ad34-2c9c7687b885-kube-api-access-vnbvt\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.106734 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-metrics-certs\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.106908 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e71259d7-d2b0-45eb-ad34-2c9c7687b885-metallb-excludel2\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.107127 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-cert\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.107334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-metrics-certs\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.107846 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/e71259d7-d2b0-45eb-ad34-2c9c7687b885-metallb-excludel2\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: E1128 13:45:54.106245 4857 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 13:45:54 crc kubenswrapper[4857]: E1128 13:45:54.107929 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist podName:e71259d7-d2b0-45eb-ad34-2c9c7687b885 nodeName:}" failed. No retries permitted until 2025-11-28 13:45:54.607914444 +0000 UTC m=+1004.731855881 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist") pod "speaker-566t2" (UID: "e71259d7-d2b0-45eb-ad34-2c9c7687b885") : secret "metallb-memberlist" not found Nov 28 13:45:54 crc kubenswrapper[4857]: E1128 13:45:54.107086 4857 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 28 13:45:54 crc kubenswrapper[4857]: E1128 13:45:54.108110 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-metrics-certs podName:301b4f9c-0612-40ff-8009-cbf39f36e85e nodeName:}" failed. No retries permitted until 2025-11-28 13:45:54.608100949 +0000 UTC m=+1004.732042386 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-metrics-certs") pod "controller-f8648f98b-h9ldf" (UID: "301b4f9c-0612-40ff-8009-cbf39f36e85e") : secret "controller-certs-secret" not found Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.114393 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-metrics-certs\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.119387 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-cert\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.126302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8sr9\" (UniqueName: \"kubernetes.io/projected/301b4f9c-0612-40ff-8009-cbf39f36e85e-kube-api-access-c8sr9\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.137176 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.137612 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnbvt\" (UniqueName: \"kubernetes.io/projected/e71259d7-d2b0-45eb-ad34-2c9c7687b885-kube-api-access-vnbvt\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.143408 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.566877 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw"] Nov 28 13:45:54 crc kubenswrapper[4857]: W1128 13:45:54.575398 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc325af37_018c_4ea2_b63d_4ab931a9a17c.slice/crio-7aff3651540dfc9026d0155b39c5edfd42e692c78cfeb36e7dd3a98d47b88d07 WatchSource:0}: Error finding container 7aff3651540dfc9026d0155b39c5edfd42e692c78cfeb36e7dd3a98d47b88d07: Status 404 returned error can't find the container with id 7aff3651540dfc9026d0155b39c5edfd42e692c78cfeb36e7dd3a98d47b88d07 Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.613676 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.613745 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-metrics-certs\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: E1128 13:45:54.614667 4857 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 28 13:45:54 crc kubenswrapper[4857]: E1128 13:45:54.614734 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist podName:e71259d7-d2b0-45eb-ad34-2c9c7687b885 nodeName:}" failed. No retries permitted until 2025-11-28 13:45:55.614710868 +0000 UTC m=+1005.738652355 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist") pod "speaker-566t2" (UID: "e71259d7-d2b0-45eb-ad34-2c9c7687b885") : secret "metallb-memberlist" not found Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.620048 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/301b4f9c-0612-40ff-8009-cbf39f36e85e-metrics-certs\") pod \"controller-f8648f98b-h9ldf\" (UID: \"301b4f9c-0612-40ff-8009-cbf39f36e85e\") " pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.695966 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" event={"ID":"c325af37-018c-4ea2-b63d-4ab931a9a17c","Type":"ContainerStarted","Data":"7aff3651540dfc9026d0155b39c5edfd42e692c78cfeb36e7dd3a98d47b88d07"} Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.697163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"b8fdcdff9ea340a4351681a507d6d7eb9fe014415668fccf89ead496d3ab1cbc"} Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.809213 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:54 crc kubenswrapper[4857]: I1128 13:45:54.987789 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-h9ldf"] Nov 28 13:45:54 crc kubenswrapper[4857]: W1128 13:45:54.997096 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod301b4f9c_0612_40ff_8009_cbf39f36e85e.slice/crio-afd04c89e5cb1c916d123c55677c2ee1007e07b11b22e1bf7a1d41427ffcb3fb WatchSource:0}: Error finding container afd04c89e5cb1c916d123c55677c2ee1007e07b11b22e1bf7a1d41427ffcb3fb: Status 404 returned error can't find the container with id afd04c89e5cb1c916d123c55677c2ee1007e07b11b22e1bf7a1d41427ffcb3fb Nov 28 13:45:55 crc kubenswrapper[4857]: I1128 13:45:55.626126 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:55 crc kubenswrapper[4857]: I1128 13:45:55.632704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/e71259d7-d2b0-45eb-ad34-2c9c7687b885-memberlist\") pod \"speaker-566t2\" (UID: \"e71259d7-d2b0-45eb-ad34-2c9c7687b885\") " pod="metallb-system/speaker-566t2" Nov 28 13:45:55 crc kubenswrapper[4857]: I1128 13:45:55.696230 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-566t2" Nov 28 13:45:55 crc kubenswrapper[4857]: I1128 13:45:55.703521 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-h9ldf" event={"ID":"301b4f9c-0612-40ff-8009-cbf39f36e85e","Type":"ContainerStarted","Data":"b53359e5c76db6e4d96f366f6ca7c29933d56f8632370b3155363eb20ff4f763"} Nov 28 13:45:55 crc kubenswrapper[4857]: I1128 13:45:55.703588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-h9ldf" event={"ID":"301b4f9c-0612-40ff-8009-cbf39f36e85e","Type":"ContainerStarted","Data":"afd04c89e5cb1c916d123c55677c2ee1007e07b11b22e1bf7a1d41427ffcb3fb"} Nov 28 13:45:55 crc kubenswrapper[4857]: W1128 13:45:55.724872 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode71259d7_d2b0_45eb_ad34_2c9c7687b885.slice/crio-4d2116fdfd3cd007cb13c1fc5885c028433b4dcffbd8c88c930f9cd25b74dd6c WatchSource:0}: Error finding container 4d2116fdfd3cd007cb13c1fc5885c028433b4dcffbd8c88c930f9cd25b74dd6c: Status 404 returned error can't find the container with id 4d2116fdfd3cd007cb13c1fc5885c028433b4dcffbd8c88c930f9cd25b74dd6c Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.711555 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-h9ldf" event={"ID":"301b4f9c-0612-40ff-8009-cbf39f36e85e","Type":"ContainerStarted","Data":"dd70a530fcb372db38ed956f9f57561a3bf6c8b794d6abc7a990a15f0fe72896"} Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.711706 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.717306 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-566t2" event={"ID":"e71259d7-d2b0-45eb-ad34-2c9c7687b885","Type":"ContainerStarted","Data":"925b75c2cd7208d35a5ef556c6457c166212871703e0771c58c945f180fcc8ef"} Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.717350 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-566t2" event={"ID":"e71259d7-d2b0-45eb-ad34-2c9c7687b885","Type":"ContainerStarted","Data":"3a1c90006bd4d54567246934a7ce3a14fbfbf93b64fc9494efa789d288740cec"} Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.717367 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-566t2" event={"ID":"e71259d7-d2b0-45eb-ad34-2c9c7687b885","Type":"ContainerStarted","Data":"4d2116fdfd3cd007cb13c1fc5885c028433b4dcffbd8c88c930f9cd25b74dd6c"} Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.717551 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-566t2" Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.734279 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-h9ldf" podStartSLOduration=3.734263937 podStartE2EDuration="3.734263937s" podCreationTimestamp="2025-11-28 13:45:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:45:56.731159003 +0000 UTC m=+1006.855100440" watchObservedRunningTime="2025-11-28 13:45:56.734263937 +0000 UTC m=+1006.858205384" Nov 28 13:45:56 crc kubenswrapper[4857]: I1128 13:45:56.756762 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-566t2" podStartSLOduration=3.756746886 podStartE2EDuration="3.756746886s" podCreationTimestamp="2025-11-28 13:45:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:45:56.75580664 +0000 UTC m=+1006.879748077" watchObservedRunningTime="2025-11-28 13:45:56.756746886 +0000 UTC m=+1006.880688323" Nov 28 13:46:02 crc kubenswrapper[4857]: I1128 13:46:02.755645 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" event={"ID":"c325af37-018c-4ea2-b63d-4ab931a9a17c","Type":"ContainerStarted","Data":"caf96e7a10a7b07c142170e747afea96da486949a917270c5839695cca74f16f"} Nov 28 13:46:02 crc kubenswrapper[4857]: I1128 13:46:02.756520 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:46:02 crc kubenswrapper[4857]: I1128 13:46:02.759664 4857 generic.go:334] "Generic (PLEG): container finished" podID="85bce704-7efb-4e9c-a6b0-c2ebbe46877d" containerID="309a3cfa56cc3fd4973363b25a4506cf8e0c611e335847dc269d6ba280bc50d8" exitCode=0 Nov 28 13:46:02 crc kubenswrapper[4857]: I1128 13:46:02.759716 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerDied","Data":"309a3cfa56cc3fd4973363b25a4506cf8e0c611e335847dc269d6ba280bc50d8"} Nov 28 13:46:02 crc kubenswrapper[4857]: I1128 13:46:02.775733 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" podStartSLOduration=2.136805707 podStartE2EDuration="9.775713283s" podCreationTimestamp="2025-11-28 13:45:53 +0000 UTC" firstStartedPulling="2025-11-28 13:45:54.577517041 +0000 UTC m=+1004.701458478" lastFinishedPulling="2025-11-28 13:46:02.216424607 +0000 UTC m=+1012.340366054" observedRunningTime="2025-11-28 13:46:02.771513419 +0000 UTC m=+1012.895454866" watchObservedRunningTime="2025-11-28 13:46:02.775713283 +0000 UTC m=+1012.899654730" Nov 28 13:46:03 crc kubenswrapper[4857]: I1128 13:46:03.769507 4857 generic.go:334] "Generic (PLEG): container finished" podID="85bce704-7efb-4e9c-a6b0-c2ebbe46877d" containerID="2a3cfd6e2b0b7ad1d108d00bef53b72ab1e771968a298d2e785d1f224abb6744" exitCode=0 Nov 28 13:46:03 crc kubenswrapper[4857]: I1128 13:46:03.770683 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerDied","Data":"2a3cfd6e2b0b7ad1d108d00bef53b72ab1e771968a298d2e785d1f224abb6744"} Nov 28 13:46:04 crc kubenswrapper[4857]: I1128 13:46:04.779521 4857 generic.go:334] "Generic (PLEG): container finished" podID="85bce704-7efb-4e9c-a6b0-c2ebbe46877d" containerID="1dbcbc8eb818d3da6c74bf737847b2f23d869ebba01a7831511df2c4aa76e7be" exitCode=0 Nov 28 13:46:04 crc kubenswrapper[4857]: I1128 13:46:04.779579 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerDied","Data":"1dbcbc8eb818d3da6c74bf737847b2f23d869ebba01a7831511df2c4aa76e7be"} Nov 28 13:46:05 crc kubenswrapper[4857]: I1128 13:46:05.791968 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"8c9d3316d029dd4821a12d34589f9209b6fe84475cb7e4b577ffec4970d2645a"} Nov 28 13:46:05 crc kubenswrapper[4857]: I1128 13:46:05.792304 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"610442aa17d51635ad24326da99aeb185afe519aa79e2937980c4252d4b48ec9"} Nov 28 13:46:05 crc kubenswrapper[4857]: I1128 13:46:05.792321 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"4a1c790cda2c70e6ed4055d3eed11074395334a9324e7eadb9e57d44578d350c"} Nov 28 13:46:05 crc kubenswrapper[4857]: I1128 13:46:05.792333 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"d80ae9a7432f81512f5007479bc00406f262704d569223d1aa1272d2116e4e02"} Nov 28 13:46:06 crc kubenswrapper[4857]: I1128 13:46:06.802488 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"96a127bd9e6a62ffb31e2cf38dae32aa01b4c0b7236ea43c74b0393fff907069"} Nov 28 13:46:06 crc kubenswrapper[4857]: I1128 13:46:06.802525 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-nxzbf" event={"ID":"85bce704-7efb-4e9c-a6b0-c2ebbe46877d","Type":"ContainerStarted","Data":"342fe770f222ba2c6c2e7e0f5c6181ec5cb43a037223f62bc80988355d59323f"} Nov 28 13:46:06 crc kubenswrapper[4857]: I1128 13:46:06.802939 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:46:06 crc kubenswrapper[4857]: I1128 13:46:06.823937 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-nxzbf" podStartSLOduration=5.903279776 podStartE2EDuration="13.823922901s" podCreationTimestamp="2025-11-28 13:45:53 +0000 UTC" firstStartedPulling="2025-11-28 13:45:54.322880936 +0000 UTC m=+1004.446822373" lastFinishedPulling="2025-11-28 13:46:02.243524051 +0000 UTC m=+1012.367465498" observedRunningTime="2025-11-28 13:46:06.822192924 +0000 UTC m=+1016.946134371" watchObservedRunningTime="2025-11-28 13:46:06.823922901 +0000 UTC m=+1016.947864338" Nov 28 13:46:09 crc kubenswrapper[4857]: I1128 13:46:09.138196 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:46:09 crc kubenswrapper[4857]: I1128 13:46:09.173208 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:46:11 crc kubenswrapper[4857]: I1128 13:46:11.308391 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:46:11 crc kubenswrapper[4857]: I1128 13:46:11.308746 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:46:14 crc kubenswrapper[4857]: I1128 13:46:14.144396 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-nxzbf" Nov 28 13:46:14 crc kubenswrapper[4857]: I1128 13:46:14.151536 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-jdrbw" Nov 28 13:46:14 crc kubenswrapper[4857]: I1128 13:46:14.815836 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-h9ldf" Nov 28 13:46:15 crc kubenswrapper[4857]: I1128 13:46:15.701152 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-566t2" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.303598 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9"] Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.304899 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.307453 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.315679 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9"] Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.462509 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.462559 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.462582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bl4qv\" (UniqueName: \"kubernetes.io/projected/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-kube-api-access-bl4qv\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.564323 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.564369 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.564392 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bl4qv\" (UniqueName: \"kubernetes.io/projected/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-kube-api-access-bl4qv\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.564926 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.565032 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.582590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bl4qv\" (UniqueName: \"kubernetes.io/projected/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-kube-api-access-bl4qv\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.622313 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:17 crc kubenswrapper[4857]: I1128 13:46:17.880329 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9"] Nov 28 13:46:17 crc kubenswrapper[4857]: W1128 13:46:17.888697 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd2aa4c1_fb15_4e3d_b015_e8d033e3fd1a.slice/crio-9774cd6d84a5c7b8bcda9fd9590e16c87a0759a26325cfef6c5a1dd9535e2c86 WatchSource:0}: Error finding container 9774cd6d84a5c7b8bcda9fd9590e16c87a0759a26325cfef6c5a1dd9535e2c86: Status 404 returned error can't find the container with id 9774cd6d84a5c7b8bcda9fd9590e16c87a0759a26325cfef6c5a1dd9535e2c86 Nov 28 13:46:18 crc kubenswrapper[4857]: I1128 13:46:18.885214 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerID="614e8fdc6e6e9e17ba394904bc0a9ff60e6ed34aebe60f95e8664a89a860a189" exitCode=0 Nov 28 13:46:18 crc kubenswrapper[4857]: I1128 13:46:18.885453 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" event={"ID":"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a","Type":"ContainerDied","Data":"614e8fdc6e6e9e17ba394904bc0a9ff60e6ed34aebe60f95e8664a89a860a189"} Nov 28 13:46:18 crc kubenswrapper[4857]: I1128 13:46:18.885625 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" event={"ID":"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a","Type":"ContainerStarted","Data":"9774cd6d84a5c7b8bcda9fd9590e16c87a0759a26325cfef6c5a1dd9535e2c86"} Nov 28 13:46:21 crc kubenswrapper[4857]: I1128 13:46:21.905507 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" event={"ID":"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a","Type":"ContainerStarted","Data":"3905cd5f9b3fdb786146dc10f75118f85ef50ccc5a5f443d1f26445fbae2844e"} Nov 28 13:46:22 crc kubenswrapper[4857]: I1128 13:46:22.916507 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerID="3905cd5f9b3fdb786146dc10f75118f85ef50ccc5a5f443d1f26445fbae2844e" exitCode=0 Nov 28 13:46:22 crc kubenswrapper[4857]: I1128 13:46:22.916558 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" event={"ID":"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a","Type":"ContainerDied","Data":"3905cd5f9b3fdb786146dc10f75118f85ef50ccc5a5f443d1f26445fbae2844e"} Nov 28 13:46:23 crc kubenswrapper[4857]: I1128 13:46:23.928427 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerID="baddecc7a58e20c99e4ad77c75d6083730a48dd54444ff7cb3867f6309ce7a06" exitCode=0 Nov 28 13:46:23 crc kubenswrapper[4857]: I1128 13:46:23.928473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" event={"ID":"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a","Type":"ContainerDied","Data":"baddecc7a58e20c99e4ad77c75d6083730a48dd54444ff7cb3867f6309ce7a06"} Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.207390 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.363170 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-bundle\") pod \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.363226 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-util\") pod \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.363262 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bl4qv\" (UniqueName: \"kubernetes.io/projected/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-kube-api-access-bl4qv\") pod \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\" (UID: \"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a\") " Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.364882 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-bundle" (OuterVolumeSpecName: "bundle") pod "fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" (UID: "fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.370329 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-kube-api-access-bl4qv" (OuterVolumeSpecName: "kube-api-access-bl4qv") pod "fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" (UID: "fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a"). InnerVolumeSpecName "kube-api-access-bl4qv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.374598 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-util" (OuterVolumeSpecName: "util") pod "fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" (UID: "fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.465218 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bl4qv\" (UniqueName: \"kubernetes.io/projected/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-kube-api-access-bl4qv\") on node \"crc\" DevicePath \"\"" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.465262 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.465271 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.943594 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" event={"ID":"fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a","Type":"ContainerDied","Data":"9774cd6d84a5c7b8bcda9fd9590e16c87a0759a26325cfef6c5a1dd9535e2c86"} Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.943643 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9" Nov 28 13:46:25 crc kubenswrapper[4857]: I1128 13:46:25.943653 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9774cd6d84a5c7b8bcda9fd9590e16c87a0759a26325cfef6c5a1dd9535e2c86" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.736893 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx"] Nov 28 13:46:29 crc kubenswrapper[4857]: E1128 13:46:29.737453 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="extract" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.737467 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="extract" Nov 28 13:46:29 crc kubenswrapper[4857]: E1128 13:46:29.737477 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="pull" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.737483 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="pull" Nov 28 13:46:29 crc kubenswrapper[4857]: E1128 13:46:29.737499 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="util" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.737505 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="util" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.737619 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a" containerName="extract" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.738013 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.740716 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.740797 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.741013 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-clpdf" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.753142 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx"] Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.821259 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjsz6\" (UniqueName: \"kubernetes.io/projected/77ffb451-fc06-439b-a773-98ab4f212710-kube-api-access-tjsz6\") pod \"cert-manager-operator-controller-manager-64cf6dff88-thnxx\" (UID: \"77ffb451-fc06-439b-a773-98ab4f212710\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.821474 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/77ffb451-fc06-439b-a773-98ab4f212710-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-thnxx\" (UID: \"77ffb451-fc06-439b-a773-98ab4f212710\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.923051 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/77ffb451-fc06-439b-a773-98ab4f212710-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-thnxx\" (UID: \"77ffb451-fc06-439b-a773-98ab4f212710\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.923224 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjsz6\" (UniqueName: \"kubernetes.io/projected/77ffb451-fc06-439b-a773-98ab4f212710-kube-api-access-tjsz6\") pod \"cert-manager-operator-controller-manager-64cf6dff88-thnxx\" (UID: \"77ffb451-fc06-439b-a773-98ab4f212710\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.923555 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/77ffb451-fc06-439b-a773-98ab4f212710-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-thnxx\" (UID: \"77ffb451-fc06-439b-a773-98ab4f212710\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:29 crc kubenswrapper[4857]: I1128 13:46:29.950783 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjsz6\" (UniqueName: \"kubernetes.io/projected/77ffb451-fc06-439b-a773-98ab4f212710-kube-api-access-tjsz6\") pod \"cert-manager-operator-controller-manager-64cf6dff88-thnxx\" (UID: \"77ffb451-fc06-439b-a773-98ab4f212710\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:30 crc kubenswrapper[4857]: I1128 13:46:30.066212 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" Nov 28 13:46:30 crc kubenswrapper[4857]: I1128 13:46:30.300603 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx"] Nov 28 13:46:30 crc kubenswrapper[4857]: I1128 13:46:30.974616 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" event={"ID":"77ffb451-fc06-439b-a773-98ab4f212710","Type":"ContainerStarted","Data":"01f55e50d6e8f154d5099ba24aa3c60c0f60ff3488307f2e86ec616d8da10a70"} Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.029800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" event={"ID":"77ffb451-fc06-439b-a773-98ab4f212710","Type":"ContainerStarted","Data":"f4c20c2139ebf54734c0534b41ecb126d3267116d4ea550661154220d76bb935"} Nov 28 13:46:39 crc kubenswrapper[4857]: I1128 13:46:39.057601 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-thnxx" podStartSLOduration=1.859060956 podStartE2EDuration="10.057586087s" podCreationTimestamp="2025-11-28 13:46:29 +0000 UTC" firstStartedPulling="2025-11-28 13:46:30.310572143 +0000 UTC m=+1040.434513580" lastFinishedPulling="2025-11-28 13:46:38.509097274 +0000 UTC m=+1048.633038711" observedRunningTime="2025-11-28 13:46:39.053896297 +0000 UTC m=+1049.177837744" watchObservedRunningTime="2025-11-28 13:46:39.057586087 +0000 UTC m=+1049.181527524" Nov 28 13:46:41 crc kubenswrapper[4857]: I1128 13:46:41.308085 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:46:41 crc kubenswrapper[4857]: I1128 13:46:41.308154 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:46:41 crc kubenswrapper[4857]: I1128 13:46:41.308206 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:46:41 crc kubenswrapper[4857]: I1128 13:46:41.308903 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb45ea7c38d2a9151e38696a74767baa9022920c5afdfffe7b22b74cc2cdddc2"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:46:41 crc kubenswrapper[4857]: I1128 13:46:41.309003 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://cb45ea7c38d2a9151e38696a74767baa9022920c5afdfffe7b22b74cc2cdddc2" gracePeriod=600 Nov 28 13:46:42 crc kubenswrapper[4857]: I1128 13:46:42.049073 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="cb45ea7c38d2a9151e38696a74767baa9022920c5afdfffe7b22b74cc2cdddc2" exitCode=0 Nov 28 13:46:42 crc kubenswrapper[4857]: I1128 13:46:42.049161 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"cb45ea7c38d2a9151e38696a74767baa9022920c5afdfffe7b22b74cc2cdddc2"} Nov 28 13:46:42 crc kubenswrapper[4857]: I1128 13:46:42.049553 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"f8014f585ed82233daf2682d55748994b4ded11ee145a4ddfa59430be03e8701"} Nov 28 13:46:42 crc kubenswrapper[4857]: I1128 13:46:42.049577 4857 scope.go:117] "RemoveContainer" containerID="4c4a6d6663fe5a6930513c4b0cca32f9c63d61f7609d54dbd2cbc81ca6f31f57" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.372243 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-6kbg8"] Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.374719 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.377006 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-tnbvs" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.377077 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.381496 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.390394 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-6kbg8"] Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.449180 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de8a159c-7c49-47a6-9dd6-34d23aa529e0-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-6kbg8\" (UID: \"de8a159c-7c49-47a6-9dd6-34d23aa529e0\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.449364 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9g8c\" (UniqueName: \"kubernetes.io/projected/de8a159c-7c49-47a6-9dd6-34d23aa529e0-kube-api-access-j9g8c\") pod \"cert-manager-webhook-f4fb5df64-6kbg8\" (UID: \"de8a159c-7c49-47a6-9dd6-34d23aa529e0\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.550576 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de8a159c-7c49-47a6-9dd6-34d23aa529e0-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-6kbg8\" (UID: \"de8a159c-7c49-47a6-9dd6-34d23aa529e0\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.550654 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9g8c\" (UniqueName: \"kubernetes.io/projected/de8a159c-7c49-47a6-9dd6-34d23aa529e0-kube-api-access-j9g8c\") pod \"cert-manager-webhook-f4fb5df64-6kbg8\" (UID: \"de8a159c-7c49-47a6-9dd6-34d23aa529e0\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.567803 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/de8a159c-7c49-47a6-9dd6-34d23aa529e0-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-6kbg8\" (UID: \"de8a159c-7c49-47a6-9dd6-34d23aa529e0\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.567859 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9g8c\" (UniqueName: \"kubernetes.io/projected/de8a159c-7c49-47a6-9dd6-34d23aa529e0-kube-api-access-j9g8c\") pod \"cert-manager-webhook-f4fb5df64-6kbg8\" (UID: \"de8a159c-7c49-47a6-9dd6-34d23aa529e0\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.692513 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.756303 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-jm54s"] Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.757430 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.759921 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-z9xwn" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.767021 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-jm54s"] Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.854230 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhcnt\" (UniqueName: \"kubernetes.io/projected/b8471048-2df4-4c56-bfce-f8e8c377b85f-kube-api-access-xhcnt\") pod \"cert-manager-cainjector-855d9ccff4-jm54s\" (UID: \"b8471048-2df4-4c56-bfce-f8e8c377b85f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.854467 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b8471048-2df4-4c56-bfce-f8e8c377b85f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-jm54s\" (UID: \"b8471048-2df4-4c56-bfce-f8e8c377b85f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.955161 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhcnt\" (UniqueName: \"kubernetes.io/projected/b8471048-2df4-4c56-bfce-f8e8c377b85f-kube-api-access-xhcnt\") pod \"cert-manager-cainjector-855d9ccff4-jm54s\" (UID: \"b8471048-2df4-4c56-bfce-f8e8c377b85f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.955251 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b8471048-2df4-4c56-bfce-f8e8c377b85f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-jm54s\" (UID: \"b8471048-2df4-4c56-bfce-f8e8c377b85f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.971471 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b8471048-2df4-4c56-bfce-f8e8c377b85f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-jm54s\" (UID: \"b8471048-2df4-4c56-bfce-f8e8c377b85f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:45 crc kubenswrapper[4857]: I1128 13:46:45.974378 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhcnt\" (UniqueName: \"kubernetes.io/projected/b8471048-2df4-4c56-bfce-f8e8c377b85f-kube-api-access-xhcnt\") pod \"cert-manager-cainjector-855d9ccff4-jm54s\" (UID: \"b8471048-2df4-4c56-bfce-f8e8c377b85f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:46 crc kubenswrapper[4857]: I1128 13:46:46.078066 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" Nov 28 13:46:46 crc kubenswrapper[4857]: I1128 13:46:46.122757 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-6kbg8"] Nov 28 13:46:46 crc kubenswrapper[4857]: I1128 13:46:46.295495 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-jm54s"] Nov 28 13:46:46 crc kubenswrapper[4857]: W1128 13:46:46.306771 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8471048_2df4_4c56_bfce_f8e8c377b85f.slice/crio-f0d9f02b387947597647c0dd8f596b0849eebe35b28880bdf16157597bcd6abb WatchSource:0}: Error finding container f0d9f02b387947597647c0dd8f596b0849eebe35b28880bdf16157597bcd6abb: Status 404 returned error can't find the container with id f0d9f02b387947597647c0dd8f596b0849eebe35b28880bdf16157597bcd6abb Nov 28 13:46:47 crc kubenswrapper[4857]: I1128 13:46:47.082313 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" event={"ID":"de8a159c-7c49-47a6-9dd6-34d23aa529e0","Type":"ContainerStarted","Data":"19747ba149df3a5078004b1f1ebaf47b1f5b2bf399cf1420a5f980c3be1a92d1"} Nov 28 13:46:47 crc kubenswrapper[4857]: I1128 13:46:47.084391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" event={"ID":"b8471048-2df4-4c56-bfce-f8e8c377b85f","Type":"ContainerStarted","Data":"f0d9f02b387947597647c0dd8f596b0849eebe35b28880bdf16157597bcd6abb"} Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.153379 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" event={"ID":"de8a159c-7c49-47a6-9dd6-34d23aa529e0","Type":"ContainerStarted","Data":"3a743d67f9f06b24096b89888674ee483f3dd76ad0c75fac9bdfd9a678b99076"} Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.154116 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.155447 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" event={"ID":"b8471048-2df4-4c56-bfce-f8e8c377b85f","Type":"ContainerStarted","Data":"6a79b68825c0ffb1ebcc5fbc2fe2e6202d87e23d9bfeeb874c05b2d3a31434ef"} Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.176055 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" podStartSLOduration=2.203520464 podStartE2EDuration="10.176036843s" podCreationTimestamp="2025-11-28 13:46:45 +0000 UTC" firstStartedPulling="2025-11-28 13:46:46.135632144 +0000 UTC m=+1056.259573581" lastFinishedPulling="2025-11-28 13:46:54.108148523 +0000 UTC m=+1064.232089960" observedRunningTime="2025-11-28 13:46:55.170004779 +0000 UTC m=+1065.293946226" watchObservedRunningTime="2025-11-28 13:46:55.176036843 +0000 UTC m=+1065.299978290" Nov 28 13:46:55 crc kubenswrapper[4857]: I1128 13:46:55.201596 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-jm54s" podStartSLOduration=2.465904619 podStartE2EDuration="10.201567104s" podCreationTimestamp="2025-11-28 13:46:45 +0000 UTC" firstStartedPulling="2025-11-28 13:46:46.30904971 +0000 UTC m=+1056.432991147" lastFinishedPulling="2025-11-28 13:46:54.044712145 +0000 UTC m=+1064.168653632" observedRunningTime="2025-11-28 13:46:55.190531825 +0000 UTC m=+1065.314473302" watchObservedRunningTime="2025-11-28 13:46:55.201567104 +0000 UTC m=+1065.325508571" Nov 28 13:47:00 crc kubenswrapper[4857]: I1128 13:47:00.697351 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-6kbg8" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.100278 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rdmt6"] Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.101810 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.106057 4857 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-gj6hx" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.116385 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rdmt6"] Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.184711 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q2sc\" (UniqueName: \"kubernetes.io/projected/dfbe1d42-d65f-4d9c-851a-50319b7742bd-kube-api-access-8q2sc\") pod \"cert-manager-86cb77c54b-rdmt6\" (UID: \"dfbe1d42-d65f-4d9c-851a-50319b7742bd\") " pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.184795 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbe1d42-d65f-4d9c-851a-50319b7742bd-bound-sa-token\") pod \"cert-manager-86cb77c54b-rdmt6\" (UID: \"dfbe1d42-d65f-4d9c-851a-50319b7742bd\") " pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.285699 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q2sc\" (UniqueName: \"kubernetes.io/projected/dfbe1d42-d65f-4d9c-851a-50319b7742bd-kube-api-access-8q2sc\") pod \"cert-manager-86cb77c54b-rdmt6\" (UID: \"dfbe1d42-d65f-4d9c-851a-50319b7742bd\") " pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.285785 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbe1d42-d65f-4d9c-851a-50319b7742bd-bound-sa-token\") pod \"cert-manager-86cb77c54b-rdmt6\" (UID: \"dfbe1d42-d65f-4d9c-851a-50319b7742bd\") " pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.316446 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/dfbe1d42-d65f-4d9c-851a-50319b7742bd-bound-sa-token\") pod \"cert-manager-86cb77c54b-rdmt6\" (UID: \"dfbe1d42-d65f-4d9c-851a-50319b7742bd\") " pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.320975 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q2sc\" (UniqueName: \"kubernetes.io/projected/dfbe1d42-d65f-4d9c-851a-50319b7742bd-kube-api-access-8q2sc\") pod \"cert-manager-86cb77c54b-rdmt6\" (UID: \"dfbe1d42-d65f-4d9c-851a-50319b7742bd\") " pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.444229 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-rdmt6" Nov 28 13:47:02 crc kubenswrapper[4857]: I1128 13:47:02.875112 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-rdmt6"] Nov 28 13:47:03 crc kubenswrapper[4857]: I1128 13:47:03.216734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-rdmt6" event={"ID":"dfbe1d42-d65f-4d9c-851a-50319b7742bd","Type":"ContainerStarted","Data":"fa9c3ec321d75bfc7250e94a2fa82a9633046a2fd40f0fbda3b6a06546c42081"} Nov 28 13:47:03 crc kubenswrapper[4857]: I1128 13:47:03.217258 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-rdmt6" event={"ID":"dfbe1d42-d65f-4d9c-851a-50319b7742bd","Type":"ContainerStarted","Data":"d79f1833beb93bf4d089d4296b3d14a4b526b2bcc2059b91bf1396477ade3cac"} Nov 28 13:47:03 crc kubenswrapper[4857]: I1128 13:47:03.246851 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-rdmt6" podStartSLOduration=1.246831364 podStartE2EDuration="1.246831364s" podCreationTimestamp="2025-11-28 13:47:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:47:03.246402492 +0000 UTC m=+1073.370343959" watchObservedRunningTime="2025-11-28 13:47:03.246831364 +0000 UTC m=+1073.370772811" Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.805058 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-g2q5c"] Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.806285 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.813282 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-52dc2" Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.823167 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.823262 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.827537 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-g2q5c"] Nov 28 13:47:14 crc kubenswrapper[4857]: I1128 13:47:14.993075 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdnpr\" (UniqueName: \"kubernetes.io/projected/90fa949c-64c6-4b20-9718-571da792d298-kube-api-access-zdnpr\") pod \"openstack-operator-index-g2q5c\" (UID: \"90fa949c-64c6-4b20-9718-571da792d298\") " pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:15 crc kubenswrapper[4857]: I1128 13:47:15.094014 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdnpr\" (UniqueName: \"kubernetes.io/projected/90fa949c-64c6-4b20-9718-571da792d298-kube-api-access-zdnpr\") pod \"openstack-operator-index-g2q5c\" (UID: \"90fa949c-64c6-4b20-9718-571da792d298\") " pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:15 crc kubenswrapper[4857]: I1128 13:47:15.112324 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdnpr\" (UniqueName: \"kubernetes.io/projected/90fa949c-64c6-4b20-9718-571da792d298-kube-api-access-zdnpr\") pod \"openstack-operator-index-g2q5c\" (UID: \"90fa949c-64c6-4b20-9718-571da792d298\") " pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:15 crc kubenswrapper[4857]: I1128 13:47:15.131055 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:15 crc kubenswrapper[4857]: I1128 13:47:15.539011 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-g2q5c"] Nov 28 13:47:16 crc kubenswrapper[4857]: I1128 13:47:16.328654 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g2q5c" event={"ID":"90fa949c-64c6-4b20-9718-571da792d298","Type":"ContainerStarted","Data":"ee92bbebf7d2e22690e4ffc5f573a562e4e2b155fa95f6da9eaf28becabfd724"} Nov 28 13:47:17 crc kubenswrapper[4857]: I1128 13:47:17.972902 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-g2q5c"] Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.343810 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g2q5c" event={"ID":"90fa949c-64c6-4b20-9718-571da792d298","Type":"ContainerStarted","Data":"3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527"} Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.344029 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-g2q5c" podUID="90fa949c-64c6-4b20-9718-571da792d298" containerName="registry-server" containerID="cri-o://3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527" gracePeriod=2 Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.363995 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-g2q5c" podStartSLOduration=2.170106255 podStartE2EDuration="4.363977927s" podCreationTimestamp="2025-11-28 13:47:14 +0000 UTC" firstStartedPulling="2025-11-28 13:47:15.545985634 +0000 UTC m=+1085.669927111" lastFinishedPulling="2025-11-28 13:47:17.739857306 +0000 UTC m=+1087.863798783" observedRunningTime="2025-11-28 13:47:18.362668932 +0000 UTC m=+1088.486610379" watchObservedRunningTime="2025-11-28 13:47:18.363977927 +0000 UTC m=+1088.487919374" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.576187 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-hjxw9"] Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.577244 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.586167 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hjxw9"] Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.699602 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.745803 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wk2v2\" (UniqueName: \"kubernetes.io/projected/cbdbcd68-3d0e-4728-8c51-250eb91c0ac5-kube-api-access-wk2v2\") pod \"openstack-operator-index-hjxw9\" (UID: \"cbdbcd68-3d0e-4728-8c51-250eb91c0ac5\") " pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.846459 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdnpr\" (UniqueName: \"kubernetes.io/projected/90fa949c-64c6-4b20-9718-571da792d298-kube-api-access-zdnpr\") pod \"90fa949c-64c6-4b20-9718-571da792d298\" (UID: \"90fa949c-64c6-4b20-9718-571da792d298\") " Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.846921 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wk2v2\" (UniqueName: \"kubernetes.io/projected/cbdbcd68-3d0e-4728-8c51-250eb91c0ac5-kube-api-access-wk2v2\") pod \"openstack-operator-index-hjxw9\" (UID: \"cbdbcd68-3d0e-4728-8c51-250eb91c0ac5\") " pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.851770 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90fa949c-64c6-4b20-9718-571da792d298-kube-api-access-zdnpr" (OuterVolumeSpecName: "kube-api-access-zdnpr") pod "90fa949c-64c6-4b20-9718-571da792d298" (UID: "90fa949c-64c6-4b20-9718-571da792d298"). InnerVolumeSpecName "kube-api-access-zdnpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.862622 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wk2v2\" (UniqueName: \"kubernetes.io/projected/cbdbcd68-3d0e-4728-8c51-250eb91c0ac5-kube-api-access-wk2v2\") pod \"openstack-operator-index-hjxw9\" (UID: \"cbdbcd68-3d0e-4728-8c51-250eb91c0ac5\") " pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.912582 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:18 crc kubenswrapper[4857]: I1128 13:47:18.948941 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdnpr\" (UniqueName: \"kubernetes.io/projected/90fa949c-64c6-4b20-9718-571da792d298-kube-api-access-zdnpr\") on node \"crc\" DevicePath \"\"" Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.129032 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-hjxw9"] Nov 28 13:47:19 crc kubenswrapper[4857]: W1128 13:47:19.137124 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcbdbcd68_3d0e_4728_8c51_250eb91c0ac5.slice/crio-8a6cd1a642d309b2a0881dc8d87141df992ecf76ce3c9a3dcacbdc657a211a66 WatchSource:0}: Error finding container 8a6cd1a642d309b2a0881dc8d87141df992ecf76ce3c9a3dcacbdc657a211a66: Status 404 returned error can't find the container with id 8a6cd1a642d309b2a0881dc8d87141df992ecf76ce3c9a3dcacbdc657a211a66 Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.351749 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hjxw9" event={"ID":"cbdbcd68-3d0e-4728-8c51-250eb91c0ac5","Type":"ContainerStarted","Data":"2ade7ba73930c20428694295670b445fa0ba26cb5c29e37380edf38cd077ff0d"} Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.352105 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-hjxw9" event={"ID":"cbdbcd68-3d0e-4728-8c51-250eb91c0ac5","Type":"ContainerStarted","Data":"8a6cd1a642d309b2a0881dc8d87141df992ecf76ce3c9a3dcacbdc657a211a66"} Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.353926 4857 generic.go:334] "Generic (PLEG): container finished" podID="90fa949c-64c6-4b20-9718-571da792d298" containerID="3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527" exitCode=0 Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.353968 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g2q5c" event={"ID":"90fa949c-64c6-4b20-9718-571da792d298","Type":"ContainerDied","Data":"3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527"} Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.353983 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-g2q5c" event={"ID":"90fa949c-64c6-4b20-9718-571da792d298","Type":"ContainerDied","Data":"ee92bbebf7d2e22690e4ffc5f573a562e4e2b155fa95f6da9eaf28becabfd724"} Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.353998 4857 scope.go:117] "RemoveContainer" containerID="3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527" Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.354093 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-g2q5c" Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.372787 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-hjxw9" podStartSLOduration=1.3178179970000001 podStartE2EDuration="1.372758165s" podCreationTimestamp="2025-11-28 13:47:18 +0000 UTC" firstStartedPulling="2025-11-28 13:47:19.140396172 +0000 UTC m=+1089.264337609" lastFinishedPulling="2025-11-28 13:47:19.19533631 +0000 UTC m=+1089.319277777" observedRunningTime="2025-11-28 13:47:19.368464379 +0000 UTC m=+1089.492405826" watchObservedRunningTime="2025-11-28 13:47:19.372758165 +0000 UTC m=+1089.496699602" Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.383194 4857 scope.go:117] "RemoveContainer" containerID="3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527" Nov 28 13:47:19 crc kubenswrapper[4857]: E1128 13:47:19.384323 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527\": container with ID starting with 3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527 not found: ID does not exist" containerID="3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527" Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.384360 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527"} err="failed to get container status \"3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527\": rpc error: code = NotFound desc = could not find container \"3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527\": container with ID starting with 3ee26a471ed10808616d461964cac373c217193a455d8295c344a90ab00e3527 not found: ID does not exist" Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.390287 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-g2q5c"] Nov 28 13:47:19 crc kubenswrapper[4857]: I1128 13:47:19.397980 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-g2q5c"] Nov 28 13:47:20 crc kubenswrapper[4857]: I1128 13:47:20.240602 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90fa949c-64c6-4b20-9718-571da792d298" path="/var/lib/kubelet/pods/90fa949c-64c6-4b20-9718-571da792d298/volumes" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.913504 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.914158 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:28 crc kubenswrapper[4857]: I1128 13:47:28.945103 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:29 crc kubenswrapper[4857]: I1128 13:47:29.446070 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-hjxw9" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.616764 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m"] Nov 28 13:47:30 crc kubenswrapper[4857]: E1128 13:47:30.617504 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90fa949c-64c6-4b20-9718-571da792d298" containerName="registry-server" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.617526 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="90fa949c-64c6-4b20-9718-571da792d298" containerName="registry-server" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.617698 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="90fa949c-64c6-4b20-9718-571da792d298" containerName="registry-server" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.618988 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.621887 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j8tnb" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.638027 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m"] Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.717271 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-util\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.717430 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnrgn\" (UniqueName: \"kubernetes.io/projected/9e893541-5110-452a-9b42-c37bff12861d-kube-api-access-lnrgn\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.717556 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-bundle\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.818392 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-bundle\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.818445 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-util\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.818487 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnrgn\" (UniqueName: \"kubernetes.io/projected/9e893541-5110-452a-9b42-c37bff12861d-kube-api-access-lnrgn\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.819054 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-bundle\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.819083 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-util\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.841012 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnrgn\" (UniqueName: \"kubernetes.io/projected/9e893541-5110-452a-9b42-c37bff12861d-kube-api-access-lnrgn\") pod \"2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:30 crc kubenswrapper[4857]: I1128 13:47:30.954420 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:31 crc kubenswrapper[4857]: I1128 13:47:31.409174 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m"] Nov 28 13:47:31 crc kubenswrapper[4857]: I1128 13:47:31.438661 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" event={"ID":"9e893541-5110-452a-9b42-c37bff12861d","Type":"ContainerStarted","Data":"6e629a1493a4efe1f4d52ea5ae88289d2d37773b91b85e02d21ddced6c7ae89c"} Nov 28 13:47:34 crc kubenswrapper[4857]: I1128 13:47:34.462188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" event={"ID":"9e893541-5110-452a-9b42-c37bff12861d","Type":"ContainerStarted","Data":"628d22c2bb9acc066082fdc54c995cdb1e4e416f9bd3d81d21b08bd98958d56a"} Nov 28 13:47:35 crc kubenswrapper[4857]: I1128 13:47:35.472769 4857 generic.go:334] "Generic (PLEG): container finished" podID="9e893541-5110-452a-9b42-c37bff12861d" containerID="628d22c2bb9acc066082fdc54c995cdb1e4e416f9bd3d81d21b08bd98958d56a" exitCode=0 Nov 28 13:47:35 crc kubenswrapper[4857]: I1128 13:47:35.472855 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" event={"ID":"9e893541-5110-452a-9b42-c37bff12861d","Type":"ContainerDied","Data":"628d22c2bb9acc066082fdc54c995cdb1e4e416f9bd3d81d21b08bd98958d56a"} Nov 28 13:47:36 crc kubenswrapper[4857]: I1128 13:47:36.481013 4857 generic.go:334] "Generic (PLEG): container finished" podID="9e893541-5110-452a-9b42-c37bff12861d" containerID="cb1b6cced3c350fb1b75afc724c7e3a311a826b5c8cefc29d8feea9546822230" exitCode=0 Nov 28 13:47:36 crc kubenswrapper[4857]: I1128 13:47:36.481086 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" event={"ID":"9e893541-5110-452a-9b42-c37bff12861d","Type":"ContainerDied","Data":"cb1b6cced3c350fb1b75afc724c7e3a311a826b5c8cefc29d8feea9546822230"} Nov 28 13:47:37 crc kubenswrapper[4857]: I1128 13:47:37.492158 4857 generic.go:334] "Generic (PLEG): container finished" podID="9e893541-5110-452a-9b42-c37bff12861d" containerID="073af2fb363f4cbc18af00128b3b9ef48a27051e494d600922a466c31c112e21" exitCode=0 Nov 28 13:47:37 crc kubenswrapper[4857]: I1128 13:47:37.492241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" event={"ID":"9e893541-5110-452a-9b42-c37bff12861d","Type":"ContainerDied","Data":"073af2fb363f4cbc18af00128b3b9ef48a27051e494d600922a466c31c112e21"} Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.809793 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.932151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-util\") pod \"9e893541-5110-452a-9b42-c37bff12861d\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.932239 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-bundle\") pod \"9e893541-5110-452a-9b42-c37bff12861d\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.932278 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnrgn\" (UniqueName: \"kubernetes.io/projected/9e893541-5110-452a-9b42-c37bff12861d-kube-api-access-lnrgn\") pod \"9e893541-5110-452a-9b42-c37bff12861d\" (UID: \"9e893541-5110-452a-9b42-c37bff12861d\") " Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.932817 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-bundle" (OuterVolumeSpecName: "bundle") pod "9e893541-5110-452a-9b42-c37bff12861d" (UID: "9e893541-5110-452a-9b42-c37bff12861d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.938836 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e893541-5110-452a-9b42-c37bff12861d-kube-api-access-lnrgn" (OuterVolumeSpecName: "kube-api-access-lnrgn") pod "9e893541-5110-452a-9b42-c37bff12861d" (UID: "9e893541-5110-452a-9b42-c37bff12861d"). InnerVolumeSpecName "kube-api-access-lnrgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:47:38 crc kubenswrapper[4857]: I1128 13:47:38.946786 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-util" (OuterVolumeSpecName: "util") pod "9e893541-5110-452a-9b42-c37bff12861d" (UID: "9e893541-5110-452a-9b42-c37bff12861d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:47:39 crc kubenswrapper[4857]: I1128 13:47:39.033775 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:47:39 crc kubenswrapper[4857]: I1128 13:47:39.033817 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnrgn\" (UniqueName: \"kubernetes.io/projected/9e893541-5110-452a-9b42-c37bff12861d-kube-api-access-lnrgn\") on node \"crc\" DevicePath \"\"" Nov 28 13:47:39 crc kubenswrapper[4857]: I1128 13:47:39.033830 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9e893541-5110-452a-9b42-c37bff12861d-util\") on node \"crc\" DevicePath \"\"" Nov 28 13:47:39 crc kubenswrapper[4857]: I1128 13:47:39.512012 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" event={"ID":"9e893541-5110-452a-9b42-c37bff12861d","Type":"ContainerDied","Data":"6e629a1493a4efe1f4d52ea5ae88289d2d37773b91b85e02d21ddced6c7ae89c"} Nov 28 13:47:39 crc kubenswrapper[4857]: I1128 13:47:39.512414 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e629a1493a4efe1f4d52ea5ae88289d2d37773b91b85e02d21ddced6c7ae89c" Nov 28 13:47:39 crc kubenswrapper[4857]: I1128 13:47:39.512091 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.176529 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg"] Nov 28 13:47:43 crc kubenswrapper[4857]: E1128 13:47:43.176768 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="pull" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.176780 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="pull" Nov 28 13:47:43 crc kubenswrapper[4857]: E1128 13:47:43.176787 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="util" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.176792 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="util" Nov 28 13:47:43 crc kubenswrapper[4857]: E1128 13:47:43.176800 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="extract" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.176805 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="extract" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.176914 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e893541-5110-452a-9b42-c37bff12861d" containerName="extract" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.177314 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.180250 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-dp57p" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.199398 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg"] Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.289592 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8nvp\" (UniqueName: \"kubernetes.io/projected/958cdbc4-1aa1-48b1-b84e-04f36c643455-kube-api-access-p8nvp\") pod \"openstack-operator-controller-operator-78449c6ff-gkhhg\" (UID: \"958cdbc4-1aa1-48b1-b84e-04f36c643455\") " pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.390741 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8nvp\" (UniqueName: \"kubernetes.io/projected/958cdbc4-1aa1-48b1-b84e-04f36c643455-kube-api-access-p8nvp\") pod \"openstack-operator-controller-operator-78449c6ff-gkhhg\" (UID: \"958cdbc4-1aa1-48b1-b84e-04f36c643455\") " pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.408994 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8nvp\" (UniqueName: \"kubernetes.io/projected/958cdbc4-1aa1-48b1-b84e-04f36c643455-kube-api-access-p8nvp\") pod \"openstack-operator-controller-operator-78449c6ff-gkhhg\" (UID: \"958cdbc4-1aa1-48b1-b84e-04f36c643455\") " pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.495156 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:47:43 crc kubenswrapper[4857]: I1128 13:47:43.920914 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg"] Nov 28 13:47:44 crc kubenswrapper[4857]: I1128 13:47:44.546617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" event={"ID":"958cdbc4-1aa1-48b1-b84e-04f36c643455","Type":"ContainerStarted","Data":"29a50a4f775b6c636d505bf16c151424f55fac82aedf8148748e3acbd876f243"} Nov 28 13:47:48 crc kubenswrapper[4857]: I1128 13:47:48.573016 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" event={"ID":"958cdbc4-1aa1-48b1-b84e-04f36c643455","Type":"ContainerStarted","Data":"45554d35f08a27a491b30c442473a86b20cb52422bb9f0572448f51af670c122"} Nov 28 13:47:48 crc kubenswrapper[4857]: I1128 13:47:48.573542 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:47:48 crc kubenswrapper[4857]: I1128 13:47:48.598149 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" podStartSLOduration=2.086012309 podStartE2EDuration="5.598133269s" podCreationTimestamp="2025-11-28 13:47:43 +0000 UTC" firstStartedPulling="2025-11-28 13:47:43.92470275 +0000 UTC m=+1114.048644187" lastFinishedPulling="2025-11-28 13:47:47.43682371 +0000 UTC m=+1117.560765147" observedRunningTime="2025-11-28 13:47:48.595155874 +0000 UTC m=+1118.719097311" watchObservedRunningTime="2025-11-28 13:47:48.598133269 +0000 UTC m=+1118.722074706" Nov 28 13:47:53 crc kubenswrapper[4857]: I1128 13:47:53.498291 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-78449c6ff-gkhhg" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.862012 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.863901 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.866072 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-h8rpp" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.870018 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.872614 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.875409 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-4jpjh" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.878794 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.908932 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-gq64h"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.910172 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.915339 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-vvkl9" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.915877 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.920589 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.921896 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.924221 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qkvdl" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.932740 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-gq64h"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.950215 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.965751 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.966693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.974000 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.979361 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.979492 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-7wn5r" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.981653 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.984635 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jzkh8" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.989100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvggh\" (UniqueName: \"kubernetes.io/projected/77d1fd9b-46fd-4df8-bfd4-2c735e2d7504-kube-api-access-zvggh\") pod \"cinder-operator-controller-manager-6b7f75547b-l44sf\" (UID: \"77d1fd9b-46fd-4df8-bfd4-2c735e2d7504\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.989159 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d72k5\" (UniqueName: \"kubernetes.io/projected/5dc1083e-b6ad-4424-982a-e85aeac54c1f-kube-api-access-d72k5\") pod \"barbican-operator-controller-manager-7b64f4fb85-w2tjv\" (UID: \"5dc1083e-b6ad-4424-982a-e85aeac54c1f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.990757 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w"] Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.991727 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.996331 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-ljsfs" Nov 28 13:48:11 crc kubenswrapper[4857]: I1128 13:48:11.997437 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.012804 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.019086 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.020048 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.025520 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-2kzkg" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.025685 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.036245 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.056054 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.060338 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.064334 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-hfjbp" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.066025 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.075883 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.076844 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.087167 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-xbth6" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090559 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvggh\" (UniqueName: \"kubernetes.io/projected/77d1fd9b-46fd-4df8-bfd4-2c735e2d7504-kube-api-access-zvggh\") pod \"cinder-operator-controller-manager-6b7f75547b-l44sf\" (UID: \"77d1fd9b-46fd-4df8-bfd4-2c735e2d7504\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090611 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xffdq\" (UniqueName: \"kubernetes.io/projected/5293402f-6ffd-4df9-853f-1c73a8d8b887-kube-api-access-xffdq\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090639 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n55h6\" (UniqueName: \"kubernetes.io/projected/057e15a1-c387-4614-b809-003fcbc1053d-kube-api-access-n55h6\") pod \"glance-operator-controller-manager-589cbd6b5b-bjf4d\" (UID: \"057e15a1-c387-4614-b809-003fcbc1053d\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090671 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d72k5\" (UniqueName: \"kubernetes.io/projected/5dc1083e-b6ad-4424-982a-e85aeac54c1f-kube-api-access-d72k5\") pod \"barbican-operator-controller-manager-7b64f4fb85-w2tjv\" (UID: \"5dc1083e-b6ad-4424-982a-e85aeac54c1f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090722 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wz5p\" (UniqueName: \"kubernetes.io/projected/c226ccd3-f651-4ad7-91c4-4fa0f194c415-kube-api-access-7wz5p\") pod \"designate-operator-controller-manager-955677c94-gq64h\" (UID: \"c226ccd3-f651-4ad7-91c4-4fa0f194c415\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090741 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgrcm\" (UniqueName: \"kubernetes.io/projected/0fb31297-9385-4426-a232-463e60388e72-kube-api-access-cgrcm\") pod \"horizon-operator-controller-manager-5d494799bf-f5zhv\" (UID: \"0fb31297-9385-4426-a232-463e60388e72\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090779 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xf7zw\" (UniqueName: \"kubernetes.io/projected/47d1bb5d-1b83-4ff2-b760-a08e56cce245-kube-api-access-xf7zw\") pod \"heat-operator-controller-manager-5b77f656f-2hbkq\" (UID: \"47d1bb5d-1b83-4ff2-b760-a08e56cce245\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.090799 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.101053 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.135925 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvggh\" (UniqueName: \"kubernetes.io/projected/77d1fd9b-46fd-4df8-bfd4-2c735e2d7504-kube-api-access-zvggh\") pod \"cinder-operator-controller-manager-6b7f75547b-l44sf\" (UID: \"77d1fd9b-46fd-4df8-bfd4-2c735e2d7504\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.148053 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d72k5\" (UniqueName: \"kubernetes.io/projected/5dc1083e-b6ad-4424-982a-e85aeac54c1f-kube-api-access-d72k5\") pod \"barbican-operator-controller-manager-7b64f4fb85-w2tjv\" (UID: \"5dc1083e-b6ad-4424-982a-e85aeac54c1f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.149994 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.150962 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.158726 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-4xqph" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.175387 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.187935 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193611 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rj8d\" (UniqueName: \"kubernetes.io/projected/c62fc075-98ca-4cda-b5a6-be5da222c5c3-kube-api-access-6rj8d\") pod \"ironic-operator-controller-manager-67cb4dc6d4-fstp8\" (UID: \"c62fc075-98ca-4cda-b5a6-be5da222c5c3\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193663 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xffdq\" (UniqueName: \"kubernetes.io/projected/5293402f-6ffd-4df9-853f-1c73a8d8b887-kube-api-access-xffdq\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193689 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbhfp\" (UniqueName: \"kubernetes.io/projected/b51da38a-adbe-4c14-86c8-7294f399b971-kube-api-access-xbhfp\") pod \"keystone-operator-controller-manager-7b4567c7cf-ntrps\" (UID: \"b51da38a-adbe-4c14-86c8-7294f399b971\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193712 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n55h6\" (UniqueName: \"kubernetes.io/projected/057e15a1-c387-4614-b809-003fcbc1053d-kube-api-access-n55h6\") pod \"glance-operator-controller-manager-589cbd6b5b-bjf4d\" (UID: \"057e15a1-c387-4614-b809-003fcbc1053d\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193745 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wz5p\" (UniqueName: \"kubernetes.io/projected/c226ccd3-f651-4ad7-91c4-4fa0f194c415-kube-api-access-7wz5p\") pod \"designate-operator-controller-manager-955677c94-gq64h\" (UID: \"c226ccd3-f651-4ad7-91c4-4fa0f194c415\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193765 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgrcm\" (UniqueName: \"kubernetes.io/projected/0fb31297-9385-4426-a232-463e60388e72-kube-api-access-cgrcm\") pod \"horizon-operator-controller-manager-5d494799bf-f5zhv\" (UID: \"0fb31297-9385-4426-a232-463e60388e72\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193786 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st74j\" (UniqueName: \"kubernetes.io/projected/d523105a-fbc2-47fc-ba6d-1738679751bc-kube-api-access-st74j\") pod \"manila-operator-controller-manager-5d499bf58b-bff9n\" (UID: \"d523105a-fbc2-47fc-ba6d-1738679751bc\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193822 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xf7zw\" (UniqueName: \"kubernetes.io/projected/47d1bb5d-1b83-4ff2-b760-a08e56cce245-kube-api-access-xf7zw\") pod \"heat-operator-controller-manager-5b77f656f-2hbkq\" (UID: \"47d1bb5d-1b83-4ff2-b760-a08e56cce245\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.193842 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:12 crc kubenswrapper[4857]: E1128 13:48:12.193971 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:12 crc kubenswrapper[4857]: E1128 13:48:12.194019 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert podName:5293402f-6ffd-4df9-853f-1c73a8d8b887 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:12.694000694 +0000 UTC m=+1142.817942131 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert") pod "infra-operator-controller-manager-57548d458d-8rl4w" (UID: "5293402f-6ffd-4df9-853f-1c73a8d8b887") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.202386 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.213088 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.214194 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.263714 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-w75gg" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.266798 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xffdq\" (UniqueName: \"kubernetes.io/projected/5293402f-6ffd-4df9-853f-1c73a8d8b887-kube-api-access-xffdq\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.284410 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wz5p\" (UniqueName: \"kubernetes.io/projected/c226ccd3-f651-4ad7-91c4-4fa0f194c415-kube-api-access-7wz5p\") pod \"designate-operator-controller-manager-955677c94-gq64h\" (UID: \"c226ccd3-f651-4ad7-91c4-4fa0f194c415\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.290302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xf7zw\" (UniqueName: \"kubernetes.io/projected/47d1bb5d-1b83-4ff2-b760-a08e56cce245-kube-api-access-xf7zw\") pod \"heat-operator-controller-manager-5b77f656f-2hbkq\" (UID: \"47d1bb5d-1b83-4ff2-b760-a08e56cce245\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.296733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n55h6\" (UniqueName: \"kubernetes.io/projected/057e15a1-c387-4614-b809-003fcbc1053d-kube-api-access-n55h6\") pod \"glance-operator-controller-manager-589cbd6b5b-bjf4d\" (UID: \"057e15a1-c387-4614-b809-003fcbc1053d\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.305001 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgrcm\" (UniqueName: \"kubernetes.io/projected/0fb31297-9385-4426-a232-463e60388e72-kube-api-access-cgrcm\") pod \"horizon-operator-controller-manager-5d494799bf-f5zhv\" (UID: \"0fb31297-9385-4426-a232-463e60388e72\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.305885 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rj8d\" (UniqueName: \"kubernetes.io/projected/c62fc075-98ca-4cda-b5a6-be5da222c5c3-kube-api-access-6rj8d\") pod \"ironic-operator-controller-manager-67cb4dc6d4-fstp8\" (UID: \"c62fc075-98ca-4cda-b5a6-be5da222c5c3\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.305926 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbhfp\" (UniqueName: \"kubernetes.io/projected/b51da38a-adbe-4c14-86c8-7294f399b971-kube-api-access-xbhfp\") pod \"keystone-operator-controller-manager-7b4567c7cf-ntrps\" (UID: \"b51da38a-adbe-4c14-86c8-7294f399b971\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.306004 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st74j\" (UniqueName: \"kubernetes.io/projected/d523105a-fbc2-47fc-ba6d-1738679751bc-kube-api-access-st74j\") pod \"manila-operator-controller-manager-5d499bf58b-bff9n\" (UID: \"d523105a-fbc2-47fc-ba6d-1738679751bc\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.306257 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46qt7\" (UniqueName: \"kubernetes.io/projected/124ac5de-0ba4-4863-a225-750a2fb5570f-kube-api-access-46qt7\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-lk8hf\" (UID: \"124ac5de-0ba4-4863-a225-750a2fb5570f\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.309319 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.310849 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.318540 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.319377 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-krd8x" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.328160 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.329255 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.335310 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-xscpj" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.340890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbhfp\" (UniqueName: \"kubernetes.io/projected/b51da38a-adbe-4c14-86c8-7294f399b971-kube-api-access-xbhfp\") pod \"keystone-operator-controller-manager-7b4567c7cf-ntrps\" (UID: \"b51da38a-adbe-4c14-86c8-7294f399b971\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.352219 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.362198 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st74j\" (UniqueName: \"kubernetes.io/projected/d523105a-fbc2-47fc-ba6d-1738679751bc-kube-api-access-st74j\") pod \"manila-operator-controller-manager-5d499bf58b-bff9n\" (UID: \"d523105a-fbc2-47fc-ba6d-1738679751bc\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.362275 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rj8d\" (UniqueName: \"kubernetes.io/projected/c62fc075-98ca-4cda-b5a6-be5da222c5c3-kube-api-access-6rj8d\") pod \"ironic-operator-controller-manager-67cb4dc6d4-fstp8\" (UID: \"c62fc075-98ca-4cda-b5a6-be5da222c5c3\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.364773 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.378047 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.388892 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.389914 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.393582 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-hkxv8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.400813 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.401993 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.403046 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.407467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46qt7\" (UniqueName: \"kubernetes.io/projected/124ac5de-0ba4-4863-a225-750a2fb5570f-kube-api-access-46qt7\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-lk8hf\" (UID: \"124ac5de-0ba4-4863-a225-750a2fb5570f\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.407888 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hh7v\" (UniqueName: \"kubernetes.io/projected/ec049f1f-9c56-4593-9953-3f18a7c90887-kube-api-access-4hh7v\") pod \"octavia-operator-controller-manager-64cdc6ff96-brvlk\" (UID: \"ec049f1f-9c56-4593-9953-3f18a7c90887\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.410037 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.412322 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-9g62d" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.413648 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.414365 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.420008 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.421469 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.425728 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-5kzx8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.426076 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.440634 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.450870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46qt7\" (UniqueName: \"kubernetes.io/projected/124ac5de-0ba4-4863-a225-750a2fb5570f-kube-api-access-46qt7\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-lk8hf\" (UID: \"124ac5de-0ba4-4863-a225-750a2fb5570f\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.458010 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.459387 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.465344 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-fmbn7" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.489763 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.501747 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.504928 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.510005 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdpzf\" (UniqueName: \"kubernetes.io/projected/cf39f47e-7284-4fae-949d-1b4de9e97751-kube-api-access-pdpzf\") pod \"ovn-operator-controller-manager-56897c768d-qcr9m\" (UID: \"cf39f47e-7284-4fae-949d-1b4de9e97751\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.510085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hh7v\" (UniqueName: \"kubernetes.io/projected/ec049f1f-9c56-4593-9953-3f18a7c90887-kube-api-access-4hh7v\") pod \"octavia-operator-controller-manager-64cdc6ff96-brvlk\" (UID: \"ec049f1f-9c56-4593-9953-3f18a7c90887\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.510156 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwz87\" (UniqueName: \"kubernetes.io/projected/85e2c248-e4f5-4192-bef2-e14c956e16f7-kube-api-access-bwz87\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.510186 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pftc6\" (UniqueName: \"kubernetes.io/projected/7187e0f3-1feb-401a-bda3-900798b760c1-kube-api-access-pftc6\") pod \"nova-operator-controller-manager-79556f57fc-b8hn9\" (UID: \"7187e0f3-1feb-401a-bda3-900798b760c1\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.510236 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.510291 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49z5w\" (UniqueName: \"kubernetes.io/projected/1a693631-7842-449a-98ab-9b3668d8bbf6-kube-api-access-49z5w\") pod \"neutron-operator-controller-manager-6fdcddb789-4l8jv\" (UID: \"1a693631-7842-449a-98ab-9b3668d8bbf6\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.511189 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.520454 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-p8rgt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.521229 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.522434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.533263 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-p8xdm" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.534006 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.536530 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.543692 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hh7v\" (UniqueName: \"kubernetes.io/projected/ec049f1f-9c56-4593-9953-3f18a7c90887-kube-api-access-4hh7v\") pod \"octavia-operator-controller-manager-64cdc6ff96-brvlk\" (UID: \"ec049f1f-9c56-4593-9953-3f18a7c90887\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.553983 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.582212 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.583661 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.584929 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.585832 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-nmnkt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.595480 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.612899 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwz87\" (UniqueName: \"kubernetes.io/projected/85e2c248-e4f5-4192-bef2-e14c956e16f7-kube-api-access-bwz87\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614462 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pftc6\" (UniqueName: \"kubernetes.io/projected/7187e0f3-1feb-401a-bda3-900798b760c1-kube-api-access-pftc6\") pod \"nova-operator-controller-manager-79556f57fc-b8hn9\" (UID: \"7187e0f3-1feb-401a-bda3-900798b760c1\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614534 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnvn5\" (UniqueName: \"kubernetes.io/projected/ea510000-70d9-4371-b791-6872e8d6905c-kube-api-access-qnvn5\") pod \"swift-operator-controller-manager-d77b94747-8f8nb\" (UID: \"ea510000-70d9-4371-b791-6872e8d6905c\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614577 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4mbx\" (UniqueName: \"kubernetes.io/projected/d7be9e32-53ca-40c8-8f12-8ee0ed5e924c-kube-api-access-w4mbx\") pod \"placement-operator-controller-manager-57988cc5b5-vbr94\" (UID: \"d7be9e32-53ca-40c8-8f12-8ee0ed5e924c\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614602 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z92vb\" (UniqueName: \"kubernetes.io/projected/9579cba1-740c-4675-beb4-858ee406b22b-kube-api-access-z92vb\") pod \"telemetry-operator-controller-manager-76cc84c6bb-xndsj\" (UID: \"9579cba1-740c-4675-beb4-858ee406b22b\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614655 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614749 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49z5w\" (UniqueName: \"kubernetes.io/projected/1a693631-7842-449a-98ab-9b3668d8bbf6-kube-api-access-49z5w\") pod \"neutron-operator-controller-manager-6fdcddb789-4l8jv\" (UID: \"1a693631-7842-449a-98ab-9b3668d8bbf6\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.614791 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdpzf\" (UniqueName: \"kubernetes.io/projected/cf39f47e-7284-4fae-949d-1b4de9e97751-kube-api-access-pdpzf\") pod \"ovn-operator-controller-manager-56897c768d-qcr9m\" (UID: \"cf39f47e-7284-4fae-949d-1b4de9e97751\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:12 crc kubenswrapper[4857]: E1128 13:48:12.615568 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:12 crc kubenswrapper[4857]: E1128 13:48:12.615616 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert podName:85e2c248-e4f5-4192-bef2-e14c956e16f7 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:13.115598015 +0000 UTC m=+1143.239539452 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" (UID: "85e2c248-e4f5-4192-bef2-e14c956e16f7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.639325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdpzf\" (UniqueName: \"kubernetes.io/projected/cf39f47e-7284-4fae-949d-1b4de9e97751-kube-api-access-pdpzf\") pod \"ovn-operator-controller-manager-56897c768d-qcr9m\" (UID: \"cf39f47e-7284-4fae-949d-1b4de9e97751\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.644607 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49z5w\" (UniqueName: \"kubernetes.io/projected/1a693631-7842-449a-98ab-9b3668d8bbf6-kube-api-access-49z5w\") pod \"neutron-operator-controller-manager-6fdcddb789-4l8jv\" (UID: \"1a693631-7842-449a-98ab-9b3668d8bbf6\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.649716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pftc6\" (UniqueName: \"kubernetes.io/projected/7187e0f3-1feb-401a-bda3-900798b760c1-kube-api-access-pftc6\") pod \"nova-operator-controller-manager-79556f57fc-b8hn9\" (UID: \"7187e0f3-1feb-401a-bda3-900798b760c1\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.660254 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.671873 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwz87\" (UniqueName: \"kubernetes.io/projected/85e2c248-e4f5-4192-bef2-e14c956e16f7-kube-api-access-bwz87\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.674549 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.703646 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.718482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.718560 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hg9zv\" (UniqueName: \"kubernetes.io/projected/8102fecd-5745-44b9-aa1c-37bd0662a28d-kube-api-access-hg9zv\") pod \"watcher-operator-controller-manager-656dcb59d4-vlcd8\" (UID: \"8102fecd-5745-44b9-aa1c-37bd0662a28d\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.718634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnvn5\" (UniqueName: \"kubernetes.io/projected/ea510000-70d9-4371-b791-6872e8d6905c-kube-api-access-qnvn5\") pod \"swift-operator-controller-manager-d77b94747-8f8nb\" (UID: \"ea510000-70d9-4371-b791-6872e8d6905c\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.718668 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4mbx\" (UniqueName: \"kubernetes.io/projected/d7be9e32-53ca-40c8-8f12-8ee0ed5e924c-kube-api-access-w4mbx\") pod \"placement-operator-controller-manager-57988cc5b5-vbr94\" (UID: \"d7be9e32-53ca-40c8-8f12-8ee0ed5e924c\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.718689 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z92vb\" (UniqueName: \"kubernetes.io/projected/9579cba1-740c-4675-beb4-858ee406b22b-kube-api-access-z92vb\") pod \"telemetry-operator-controller-manager-76cc84c6bb-xndsj\" (UID: \"9579cba1-740c-4675-beb4-858ee406b22b\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.718804 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzgxj\" (UniqueName: \"kubernetes.io/projected/dcfce002-7195-42ef-932f-c5a8eebeb87f-kube-api-access-nzgxj\") pod \"test-operator-controller-manager-5cd6c7f4c8-gcvrp\" (UID: \"dcfce002-7195-42ef-932f-c5a8eebeb87f\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:12 crc kubenswrapper[4857]: E1128 13:48:12.719857 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:12 crc kubenswrapper[4857]: E1128 13:48:12.719958 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert podName:5293402f-6ffd-4df9-853f-1c73a8d8b887 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:13.71991048 +0000 UTC m=+1143.843851907 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert") pod "infra-operator-controller-manager-57548d458d-8rl4w" (UID: "5293402f-6ffd-4df9-853f-1c73a8d8b887") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.723606 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.727087 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.728774 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.731275 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.731458 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.731630 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-zgzb7" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.753762 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.756703 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4mbx\" (UniqueName: \"kubernetes.io/projected/d7be9e32-53ca-40c8-8f12-8ee0ed5e924c-kube-api-access-w4mbx\") pod \"placement-operator-controller-manager-57988cc5b5-vbr94\" (UID: \"d7be9e32-53ca-40c8-8f12-8ee0ed5e924c\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.771454 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnvn5\" (UniqueName: \"kubernetes.io/projected/ea510000-70d9-4371-b791-6872e8d6905c-kube-api-access-qnvn5\") pod \"swift-operator-controller-manager-d77b94747-8f8nb\" (UID: \"ea510000-70d9-4371-b791-6872e8d6905c\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.784156 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.803013 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z92vb\" (UniqueName: \"kubernetes.io/projected/9579cba1-740c-4675-beb4-858ee406b22b-kube-api-access-z92vb\") pod \"telemetry-operator-controller-manager-76cc84c6bb-xndsj\" (UID: \"9579cba1-740c-4675-beb4-858ee406b22b\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.803128 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.823405 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzgxj\" (UniqueName: \"kubernetes.io/projected/dcfce002-7195-42ef-932f-c5a8eebeb87f-kube-api-access-nzgxj\") pod \"test-operator-controller-manager-5cd6c7f4c8-gcvrp\" (UID: \"dcfce002-7195-42ef-932f-c5a8eebeb87f\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.823467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hg9zv\" (UniqueName: \"kubernetes.io/projected/8102fecd-5745-44b9-aa1c-37bd0662a28d-kube-api-access-hg9zv\") pod \"watcher-operator-controller-manager-656dcb59d4-vlcd8\" (UID: \"8102fecd-5745-44b9-aa1c-37bd0662a28d\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.824356 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.836077 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.837018 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.841706 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.842966 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.843723 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-68t9x" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.847553 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hg9zv\" (UniqueName: \"kubernetes.io/projected/8102fecd-5745-44b9-aa1c-37bd0662a28d-kube-api-access-hg9zv\") pod \"watcher-operator-controller-manager-656dcb59d4-vlcd8\" (UID: \"8102fecd-5745-44b9-aa1c-37bd0662a28d\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.848202 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzgxj\" (UniqueName: \"kubernetes.io/projected/dcfce002-7195-42ef-932f-c5a8eebeb87f-kube-api-access-nzgxj\") pod \"test-operator-controller-manager-5cd6c7f4c8-gcvrp\" (UID: \"dcfce002-7195-42ef-932f-c5a8eebeb87f\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.859019 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.872200 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv"] Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.878231 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:12 crc kubenswrapper[4857]: W1128 13:48:12.882057 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5dc1083e_b6ad_4424_982a_e85aeac54c1f.slice/crio-34fdcd0a4c19998af415d4fb8b1e8963ec28259fae0a71172b0b2b54f56cbc60 WatchSource:0}: Error finding container 34fdcd0a4c19998af415d4fb8b1e8963ec28259fae0a71172b0b2b54f56cbc60: Status 404 returned error can't find the container with id 34fdcd0a4c19998af415d4fb8b1e8963ec28259fae0a71172b0b2b54f56cbc60 Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.901829 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.924602 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx89s\" (UniqueName: \"kubernetes.io/projected/2b9b746d-698b-4ac1-b8e7-2b431b230985-kube-api-access-hx89s\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.924729 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.924900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:12 crc kubenswrapper[4857]: I1128 13:48:12.961608 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.026196 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx89s\" (UniqueName: \"kubernetes.io/projected/2b9b746d-698b-4ac1-b8e7-2b431b230985-kube-api-access-hx89s\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.026379 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.026413 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cn7s\" (UniqueName: \"kubernetes.io/projected/56d61304-2a03-427c-884e-cb000f6d24ea-kube-api-access-4cn7s\") pod \"rabbitmq-cluster-operator-manager-668c99d594-9mbjq\" (UID: \"56d61304-2a03-427c-884e-cb000f6d24ea\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.026478 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.026613 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.026665 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:13.526651039 +0000 UTC m=+1143.650592476 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.027043 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.027074 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:13.52706637 +0000 UTC m=+1143.651007807 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "metrics-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.052663 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.055058 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx89s\" (UniqueName: \"kubernetes.io/projected/2b9b746d-698b-4ac1-b8e7-2b431b230985-kube-api-access-hx89s\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.131691 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.131777 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cn7s\" (UniqueName: \"kubernetes.io/projected/56d61304-2a03-427c-884e-cb000f6d24ea-kube-api-access-4cn7s\") pod \"rabbitmq-cluster-operator-manager-668c99d594-9mbjq\" (UID: \"56d61304-2a03-427c-884e-cb000f6d24ea\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.132218 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.132263 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert podName:85e2c248-e4f5-4192-bef2-e14c956e16f7 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:14.132249567 +0000 UTC m=+1144.256191004 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" (UID: "85e2c248-e4f5-4192-bef2-e14c956e16f7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.185454 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cn7s\" (UniqueName: \"kubernetes.io/projected/56d61304-2a03-427c-884e-cb000f6d24ea-kube-api-access-4cn7s\") pod \"rabbitmq-cluster-operator-manager-668c99d594-9mbjq\" (UID: \"56d61304-2a03-427c-884e-cb000f6d24ea\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.238490 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.258673 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n"] Nov 28 13:48:13 crc kubenswrapper[4857]: W1128 13:48:13.312082 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd523105a_fbc2_47fc_ba6d_1738679751bc.slice/crio-27ec046e4fd99c76572dcbd31f08dd5713111c527e9691080e9408472c437928 WatchSource:0}: Error finding container 27ec046e4fd99c76572dcbd31f08dd5713111c527e9691080e9408472c437928: Status 404 returned error can't find the container with id 27ec046e4fd99c76572dcbd31f08dd5713111c527e9691080e9408472c437928 Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.464486 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.538489 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.538544 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.538676 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.538694 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.538727 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:14.538713076 +0000 UTC m=+1144.662654513 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.538741 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:14.538734897 +0000 UTC m=+1144.662676334 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "metrics-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.595739 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.625207 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-gq64h"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.740956 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.741189 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: E1128 13:48:13.741247 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert podName:5293402f-6ffd-4df9-853f-1c73a8d8b887 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:15.741233163 +0000 UTC m=+1145.865174590 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert") pod "infra-operator-controller-manager-57548d458d-8rl4w" (UID: "5293402f-6ffd-4df9-853f-1c73a8d8b887") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.795155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" event={"ID":"c226ccd3-f651-4ad7-91c4-4fa0f194c415","Type":"ContainerStarted","Data":"f51620226564d35218da775f4161ae270aa97170640bd4da53b1a9502dbc8fc1"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.796091 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" event={"ID":"77d1fd9b-46fd-4df8-bfd4-2c735e2d7504","Type":"ContainerStarted","Data":"c53fdd8ca967af247f8af67e3fdd3105f1fc41cfe8df898f382ab037121e9caf"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.797424 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" event={"ID":"5dc1083e-b6ad-4424-982a-e85aeac54c1f","Type":"ContainerStarted","Data":"34fdcd0a4c19998af415d4fb8b1e8963ec28259fae0a71172b0b2b54f56cbc60"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.798875 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" event={"ID":"47d1bb5d-1b83-4ff2-b760-a08e56cce245","Type":"ContainerStarted","Data":"5b358ee80ed7c32eca95901e3401c4a67b8f488e122583e6ba25221653c1e323"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.799906 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" event={"ID":"0fb31297-9385-4426-a232-463e60388e72","Type":"ContainerStarted","Data":"9dbcaf345aa498e9c3321cf35e64fdbd70a159030a4262bc02fe2e4ad1efc896"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.803035 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" event={"ID":"d523105a-fbc2-47fc-ba6d-1738679751bc","Type":"ContainerStarted","Data":"27ec046e4fd99c76572dcbd31f08dd5713111c527e9691080e9408472c437928"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.804044 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" event={"ID":"b51da38a-adbe-4c14-86c8-7294f399b971","Type":"ContainerStarted","Data":"44e2b5bdec7ea6c6d593f296e1f3aa61cf171bb1cdf60c44165c2e6c28e76eec"} Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.865061 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.880056 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.884989 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.897314 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94"] Nov 28 13:48:13 crc kubenswrapper[4857]: W1128 13:48:13.899260 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod057e15a1_c387_4614_b809_003fcbc1053d.slice/crio-5a4fb00f8bfaef44c5617ee6d66608c08a6c1ef5ad52308b6fccd8bb45addadd WatchSource:0}: Error finding container 5a4fb00f8bfaef44c5617ee6d66608c08a6c1ef5ad52308b6fccd8bb45addadd: Status 404 returned error can't find the container with id 5a4fb00f8bfaef44c5617ee6d66608c08a6c1ef5ad52308b6fccd8bb45addadd Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.899403 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.918929 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.944144 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9"] Nov 28 13:48:13 crc kubenswrapper[4857]: I1128 13:48:13.951210 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj"] Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.049294 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb"] Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.063724 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv"] Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.073606 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m"] Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.093638 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8"] Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.110355 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq"] Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.146572 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.146715 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.146787 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert podName:85e2c248-e4f5-4192-bef2-e14c956e16f7 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:16.146766458 +0000 UTC m=+1146.270707975 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" (UID: "85e2c248-e4f5-4192-bef2-e14c956e16f7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:14 crc kubenswrapper[4857]: W1128 13:48:14.338271 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9579cba1_740c_4675_beb4_858ee406b22b.slice/crio-5864aecdd96404ccff31a16ac8cfc643f8c273a27a1799f44c4ef52736f05a20 WatchSource:0}: Error finding container 5864aecdd96404ccff31a16ac8cfc643f8c273a27a1799f44c4ef52736f05a20: Status 404 returned error can't find the container with id 5864aecdd96404ccff31a16ac8cfc643f8c273a27a1799f44c4ef52736f05a20 Nov 28 13:48:14 crc kubenswrapper[4857]: W1128 13:48:14.344777 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf39f47e_7284_4fae_949d_1b4de9e97751.slice/crio-82b871e1cc42457b4aaec7ccdaf19832d9a07da3179c5818eba86ee0beb55a03 WatchSource:0}: Error finding container 82b871e1cc42457b4aaec7ccdaf19832d9a07da3179c5818eba86ee0beb55a03: Status 404 returned error can't find the container with id 82b871e1cc42457b4aaec7ccdaf19832d9a07da3179c5818eba86ee0beb55a03 Nov 28 13:48:14 crc kubenswrapper[4857]: W1128 13:48:14.350936 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8102fecd_5745_44b9_aa1c_37bd0662a28d.slice/crio-1fd2592004dfce05ca3970da1520009786a4ffc3415f9288de87175c4029770c WatchSource:0}: Error finding container 1fd2592004dfce05ca3970da1520009786a4ffc3415f9288de87175c4029770c: Status 404 returned error can't find the container with id 1fd2592004dfce05ca3970da1520009786a4ffc3415f9288de87175c4029770c Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.360624 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-49z5w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-4l8jv_openstack-operators(1a693631-7842-449a-98ab-9b3668d8bbf6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.361612 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hg9zv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-vlcd8_openstack-operators(8102fecd-5745-44b9-aa1c-37bd0662a28d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.376521 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4cn7s,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-9mbjq_openstack-operators(56d61304-2a03-427c-884e-cb000f6d24ea): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.376698 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-49z5w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-6fdcddb789-4l8jv_openstack-operators(1a693631-7842-449a-98ab-9b3668d8bbf6): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.376799 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hg9zv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-vlcd8_openstack-operators(8102fecd-5745-44b9-aa1c-37bd0662a28d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.378139 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" podUID="8102fecd-5745-44b9-aa1c-37bd0662a28d" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.378242 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" podUID="56d61304-2a03-427c-884e-cb000f6d24ea" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.378277 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" podUID="1a693631-7842-449a-98ab-9b3668d8bbf6" Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.552674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.552764 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.552962 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.553034 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:16.553012092 +0000 UTC m=+1146.676953529 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "webhook-server-cert" not found Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.553155 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.553251 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:16.553228447 +0000 UTC m=+1146.677169884 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "metrics-server-cert" not found Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.831537 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" event={"ID":"057e15a1-c387-4614-b809-003fcbc1053d","Type":"ContainerStarted","Data":"5a4fb00f8bfaef44c5617ee6d66608c08a6c1ef5ad52308b6fccd8bb45addadd"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.832756 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" event={"ID":"c62fc075-98ca-4cda-b5a6-be5da222c5c3","Type":"ContainerStarted","Data":"938d6e64245e5102d8fb89d457082317d52e37c399d1b3c68778a2a67e45f28c"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.850140 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" event={"ID":"dcfce002-7195-42ef-932f-c5a8eebeb87f","Type":"ContainerStarted","Data":"e3c0fd2c9a3239a9e5579fcaaa2823acf887642f7ad36c790e3cd0d37ae5cf83"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.851987 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" event={"ID":"7187e0f3-1feb-401a-bda3-900798b760c1","Type":"ContainerStarted","Data":"efe870032340315cd52359a7241a3f9f4026e8167331e4f0ff228880a3a161ad"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.855200 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" event={"ID":"d7be9e32-53ca-40c8-8f12-8ee0ed5e924c","Type":"ContainerStarted","Data":"46523900abc6933dd58d7bc1effbf44c77076000e12e271e105a054f6da27656"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.857068 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" event={"ID":"1a693631-7842-449a-98ab-9b3668d8bbf6","Type":"ContainerStarted","Data":"9cd340c725b3aed631ce26ad88dd2d02fd77cd76dddf044235f7bf54ea087d80"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.859306 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" event={"ID":"9579cba1-740c-4675-beb4-858ee406b22b","Type":"ContainerStarted","Data":"5864aecdd96404ccff31a16ac8cfc643f8c273a27a1799f44c4ef52736f05a20"} Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.859762 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" podUID="1a693631-7842-449a-98ab-9b3668d8bbf6" Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.861592 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" event={"ID":"cf39f47e-7284-4fae-949d-1b4de9e97751","Type":"ContainerStarted","Data":"82b871e1cc42457b4aaec7ccdaf19832d9a07da3179c5818eba86ee0beb55a03"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.863712 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" event={"ID":"ea510000-70d9-4371-b791-6872e8d6905c","Type":"ContainerStarted","Data":"fb7569cbffa0ac594403ed5b9988e91cddab176d7b12e2e8e789d507853da7e8"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.865144 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" event={"ID":"56d61304-2a03-427c-884e-cb000f6d24ea","Type":"ContainerStarted","Data":"a9d709d2f15a848eb9ea0ec97cf05351ce8200cf4ba28de357c12e89f37b1f5d"} Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.866631 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" podUID="56d61304-2a03-427c-884e-cb000f6d24ea" Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.867541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" event={"ID":"124ac5de-0ba4-4863-a225-750a2fb5570f","Type":"ContainerStarted","Data":"303f1b3e6cc4a8e60f0dec42f092611bcf42a109bf93b20ce16c51b982cd7ac0"} Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.869800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" event={"ID":"8102fecd-5745-44b9-aa1c-37bd0662a28d","Type":"ContainerStarted","Data":"1fd2592004dfce05ca3970da1520009786a4ffc3415f9288de87175c4029770c"} Nov 28 13:48:14 crc kubenswrapper[4857]: E1128 13:48:14.880446 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" podUID="8102fecd-5745-44b9-aa1c-37bd0662a28d" Nov 28 13:48:14 crc kubenswrapper[4857]: I1128 13:48:14.881112 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" event={"ID":"ec049f1f-9c56-4593-9953-3f18a7c90887","Type":"ContainerStarted","Data":"3ec9d47abc2972dd4f7da09a86feb25536c89a8e94e368f42aca00c860f53ed9"} Nov 28 13:48:15 crc kubenswrapper[4857]: I1128 13:48:15.786742 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:15 crc kubenswrapper[4857]: E1128 13:48:15.786931 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:15 crc kubenswrapper[4857]: E1128 13:48:15.787022 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert podName:5293402f-6ffd-4df9-853f-1c73a8d8b887 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:19.787001827 +0000 UTC m=+1149.910943274 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert") pod "infra-operator-controller-manager-57548d458d-8rl4w" (UID: "5293402f-6ffd-4df9-853f-1c73a8d8b887") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:15 crc kubenswrapper[4857]: E1128 13:48:15.898638 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" podUID="56d61304-2a03-427c-884e-cb000f6d24ea" Nov 28 13:48:15 crc kubenswrapper[4857]: E1128 13:48:15.900092 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" podUID="8102fecd-5745-44b9-aa1c-37bd0662a28d" Nov 28 13:48:15 crc kubenswrapper[4857]: E1128 13:48:15.908075 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:e00a9ed0ab26c5b745bd804ab1fe6b22428d026f17ea05a05f045e060342f46c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" podUID="1a693631-7842-449a-98ab-9b3668d8bbf6" Nov 28 13:48:16 crc kubenswrapper[4857]: I1128 13:48:16.202231 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:16 crc kubenswrapper[4857]: E1128 13:48:16.202457 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:16 crc kubenswrapper[4857]: E1128 13:48:16.202510 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert podName:85e2c248-e4f5-4192-bef2-e14c956e16f7 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:20.202493124 +0000 UTC m=+1150.326434571 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" (UID: "85e2c248-e4f5-4192-bef2-e14c956e16f7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:16 crc kubenswrapper[4857]: I1128 13:48:16.607567 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:16 crc kubenswrapper[4857]: I1128 13:48:16.607626 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:16 crc kubenswrapper[4857]: E1128 13:48:16.607768 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:48:16 crc kubenswrapper[4857]: E1128 13:48:16.607813 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:20.607800413 +0000 UTC m=+1150.731741850 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "webhook-server-cert" not found Nov 28 13:48:16 crc kubenswrapper[4857]: E1128 13:48:16.607933 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:48:16 crc kubenswrapper[4857]: E1128 13:48:16.608064 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:20.608042919 +0000 UTC m=+1150.731984426 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "metrics-server-cert" not found Nov 28 13:48:19 crc kubenswrapper[4857]: I1128 13:48:19.855420 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:19 crc kubenswrapper[4857]: E1128 13:48:19.855621 4857 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:19 crc kubenswrapper[4857]: E1128 13:48:19.856032 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert podName:5293402f-6ffd-4df9-853f-1c73a8d8b887 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:27.856007965 +0000 UTC m=+1157.979949492 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert") pod "infra-operator-controller-manager-57548d458d-8rl4w" (UID: "5293402f-6ffd-4df9-853f-1c73a8d8b887") : secret "infra-operator-webhook-server-cert" not found Nov 28 13:48:20 crc kubenswrapper[4857]: I1128 13:48:20.260217 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:20 crc kubenswrapper[4857]: E1128 13:48:20.260480 4857 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:20 crc kubenswrapper[4857]: E1128 13:48:20.260589 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert podName:85e2c248-e4f5-4192-bef2-e14c956e16f7 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:28.260558986 +0000 UTC m=+1158.384500463 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert") pod "openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" (UID: "85e2c248-e4f5-4192-bef2-e14c956e16f7") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 28 13:48:20 crc kubenswrapper[4857]: I1128 13:48:20.665106 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:20 crc kubenswrapper[4857]: I1128 13:48:20.665228 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:20 crc kubenswrapper[4857]: E1128 13:48:20.665295 4857 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 28 13:48:20 crc kubenswrapper[4857]: E1128 13:48:20.665392 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:28.665374913 +0000 UTC m=+1158.789316350 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "metrics-server-cert" not found Nov 28 13:48:20 crc kubenswrapper[4857]: E1128 13:48:20.665456 4857 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 28 13:48:20 crc kubenswrapper[4857]: E1128 13:48:20.665550 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs podName:2b9b746d-698b-4ac1-b8e7-2b431b230985 nodeName:}" failed. No retries permitted until 2025-11-28 13:48:28.665528437 +0000 UTC m=+1158.789469894 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs") pod "openstack-operator-controller-manager-7894b8b8d-fxkwb" (UID: "2b9b746d-698b-4ac1-b8e7-2b431b230985") : secret "webhook-server-cert" not found Nov 28 13:48:27 crc kubenswrapper[4857]: I1128 13:48:27.230963 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:48:27 crc kubenswrapper[4857]: I1128 13:48:27.905594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:27 crc kubenswrapper[4857]: I1128 13:48:27.923631 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5293402f-6ffd-4df9-853f-1c73a8d8b887-cert\") pod \"infra-operator-controller-manager-57548d458d-8rl4w\" (UID: \"5293402f-6ffd-4df9-853f-1c73a8d8b887\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:27 crc kubenswrapper[4857]: I1128 13:48:27.934413 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.311689 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.316411 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/85e2c248-e4f5-4192-bef2-e14c956e16f7-cert\") pod \"openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt\" (UID: \"85e2c248-e4f5-4192-bef2-e14c956e16f7\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:28 crc kubenswrapper[4857]: E1128 13:48:28.391670 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4" Nov 28 13:48:28 crc kubenswrapper[4857]: E1128 13:48:28.392041 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qnvn5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-8f8nb_openstack-operators(ea510000-70d9-4371-b791-6872e8d6905c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.422117 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.716208 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.716293 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.728413 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-metrics-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.728718 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2b9b746d-698b-4ac1-b8e7-2b431b230985-webhook-certs\") pod \"openstack-operator-controller-manager-7894b8b8d-fxkwb\" (UID: \"2b9b746d-698b-4ac1-b8e7-2b431b230985\") " pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:28 crc kubenswrapper[4857]: I1128 13:48:28.950956 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:29 crc kubenswrapper[4857]: E1128 13:48:29.022678 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677" Nov 28 13:48:29 crc kubenswrapper[4857]: E1128 13:48:29.023336 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xf7zw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-2hbkq_openstack-operators(47d1bb5d-1b83-4ff2-b760-a08e56cce245): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:29 crc kubenswrapper[4857]: E1128 13:48:29.551976 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2" Nov 28 13:48:29 crc kubenswrapper[4857]: E1128 13:48:29.552220 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-46qt7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-lk8hf_openstack-operators(124ac5de-0ba4-4863-a225-750a2fb5570f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:30 crc kubenswrapper[4857]: E1128 13:48:30.183536 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c" Nov 28 13:48:30 crc kubenswrapper[4857]: E1128 13:48:30.183974 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:3dbf9fd9dce75f1fb250ee4c4097ad77d2f34110b61d85e37abd9c472e022e6c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d72k5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-7b64f4fb85-w2tjv_openstack-operators(5dc1083e-b6ad-4424-982a-e85aeac54c1f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:30 crc kubenswrapper[4857]: E1128 13:48:30.610235 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7" Nov 28 13:48:30 crc kubenswrapper[4857]: E1128 13:48:30.610448 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:c053e34316044f14929e16e4f0d97f9f1b24cb68b5e22b925ca74c66aaaed0a7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pftc6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-79556f57fc-b8hn9_openstack-operators(7187e0f3-1feb-401a-bda3-900798b760c1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:31 crc kubenswrapper[4857]: E1128 13:48:31.120753 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6" Nov 28 13:48:31 crc kubenswrapper[4857]: E1128 13:48:31.120971 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:bbb543d2d67c73e5df5d6357c3251363eb34a99575c5bf10416edd45dbdae2f6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pdpzf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-56897c768d-qcr9m_openstack-operators(cf39f47e-7284-4fae-949d-1b4de9e97751): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:31 crc kubenswrapper[4857]: E1128 13:48:31.818188 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 28 13:48:31 crc kubenswrapper[4857]: E1128 13:48:31.818830 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xbhfp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-ntrps_openstack-operators(b51da38a-adbe-4c14-86c8-7294f399b971): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.091647 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb"] Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.674887 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w"] Nov 28 13:48:34 crc kubenswrapper[4857]: I1128 13:48:34.732696 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt"] Nov 28 13:48:34 crc kubenswrapper[4857]: W1128 13:48:34.825068 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5293402f_6ffd_4df9_853f_1c73a8d8b887.slice/crio-185449ba2da7b767f5f74530ad44caa1a9082f918f2546c91ddbc0043c4e4f6e WatchSource:0}: Error finding container 185449ba2da7b767f5f74530ad44caa1a9082f918f2546c91ddbc0043c4e4f6e: Status 404 returned error can't find the container with id 185449ba2da7b767f5f74530ad44caa1a9082f918f2546c91ddbc0043c4e4f6e Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.067122 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" event={"ID":"9579cba1-740c-4675-beb4-858ee406b22b","Type":"ContainerStarted","Data":"aced84cbc5574a81cd942f3addc95cc8fff25d150dac1598f2b7e4c486b2aec8"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.070359 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" event={"ID":"c62fc075-98ca-4cda-b5a6-be5da222c5c3","Type":"ContainerStarted","Data":"d253c01bfc4ec61ecc8af21267dc5b589f3e8d246c2407c0e1d6730bcbbc22a9"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.080426 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" event={"ID":"d523105a-fbc2-47fc-ba6d-1738679751bc","Type":"ContainerStarted","Data":"83b800e0776335748bca9034646eb190d709352fcb437ba908493e78a7ccf2bf"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.091544 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" event={"ID":"85e2c248-e4f5-4192-bef2-e14c956e16f7","Type":"ContainerStarted","Data":"3401998455e05dbcaa4365f4884c4faa853a5e7f3f06c3c4c265cce7f3ccfbd6"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.094929 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" event={"ID":"0fb31297-9385-4426-a232-463e60388e72","Type":"ContainerStarted","Data":"0b9fd824f5d62b293ea31269c5d6b5ec512026140cb9d76b2962f5fa10c1520b"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.101122 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" event={"ID":"5293402f-6ffd-4df9-853f-1c73a8d8b887","Type":"ContainerStarted","Data":"185449ba2da7b767f5f74530ad44caa1a9082f918f2546c91ddbc0043c4e4f6e"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.102304 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" event={"ID":"2b9b746d-698b-4ac1-b8e7-2b431b230985","Type":"ContainerStarted","Data":"60ee7f0e100c95e61b5f3d5aba9db83bc720e2263e4480eeeab5f021d7a7f71b"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.105790 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" event={"ID":"ec049f1f-9c56-4593-9953-3f18a7c90887","Type":"ContainerStarted","Data":"6221ca4dd652da97cab1f601c854274242c76d7f905e30d94eac43c1da7683ca"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.108655 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" event={"ID":"057e15a1-c387-4614-b809-003fcbc1053d","Type":"ContainerStarted","Data":"25ce3759a270427ee7a28d1abb3aaee5a6639aee0868e2a5e23031d63f520312"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.111449 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" event={"ID":"dcfce002-7195-42ef-932f-c5a8eebeb87f","Type":"ContainerStarted","Data":"26052df9fa190db4cac68a7cdc4ce15e7cea845b6fa25de0f4f67153fe9d78c0"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.113372 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" event={"ID":"c226ccd3-f651-4ad7-91c4-4fa0f194c415","Type":"ContainerStarted","Data":"ef5ffd7af197eff9fe263bdca089762afd22805ed4bda0cdf5a0ce38c692ea2d"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.115014 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" event={"ID":"77d1fd9b-46fd-4df8-bfd4-2c735e2d7504","Type":"ContainerStarted","Data":"4bef7e4ccad3eae9c45bd6f9530c439d9641b18ef18d963dd6b1b89e9908112b"} Nov 28 13:48:35 crc kubenswrapper[4857]: I1128 13:48:35.132474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" event={"ID":"d7be9e32-53ca-40c8-8f12-8ee0ed5e924c","Type":"ContainerStarted","Data":"43f856aa6f5df26586cf57fa67669bdeb71c3f8627702b0adee2f028d9cf0c15"} Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.162798 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" event={"ID":"8102fecd-5745-44b9-aa1c-37bd0662a28d","Type":"ContainerStarted","Data":"7af2d07fa952788413fac79d1e89d89432a1c041a0810310127c031266775473"} Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.168666 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" event={"ID":"2b9b746d-698b-4ac1-b8e7-2b431b230985","Type":"ContainerStarted","Data":"99a02e9fdcd5642de72253b1737b582a2dfcb450f54190902f110453cb3c311a"} Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.169528 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:36 crc kubenswrapper[4857]: I1128 13:48:36.238643 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" podStartSLOduration=24.238619724 podStartE2EDuration="24.238619724s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:48:36.227600426 +0000 UTC m=+1166.351541863" watchObservedRunningTime="2025-11-28 13:48:36.238619724 +0000 UTC m=+1166.362561161" Nov 28 13:48:37 crc kubenswrapper[4857]: I1128 13:48:37.188349 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" event={"ID":"1a693631-7842-449a-98ab-9b3668d8bbf6","Type":"ContainerStarted","Data":"0a483bbb36a54779aba0b76cfe419e178a70d76e50f0651d2b4e81ee7ee5edb7"} Nov 28 13:48:37 crc kubenswrapper[4857]: I1128 13:48:37.190650 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" event={"ID":"56d61304-2a03-427c-884e-cb000f6d24ea","Type":"ContainerStarted","Data":"de5b3fbf3c10d78eb3f5b9e28e5e6e4cebeaccc085e1017ee5ec54cc85f7c9fc"} Nov 28 13:48:37 crc kubenswrapper[4857]: I1128 13:48:37.206519 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-9mbjq" podStartSLOduration=5.163266057 podStartE2EDuration="25.206499167s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.376266016 +0000 UTC m=+1144.500207443" lastFinishedPulling="2025-11-28 13:48:34.419499116 +0000 UTC m=+1164.543440553" observedRunningTime="2025-11-28 13:48:37.205693777 +0000 UTC m=+1167.329635214" watchObservedRunningTime="2025-11-28 13:48:37.206499167 +0000 UTC m=+1167.330440604" Nov 28 13:48:41 crc kubenswrapper[4857]: I1128 13:48:41.309317 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:48:41 crc kubenswrapper[4857]: I1128 13:48:41.310123 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:48:42 crc kubenswrapper[4857]: E1128 13:48:42.720324 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" podUID="124ac5de-0ba4-4863-a225-750a2fb5570f" Nov 28 13:48:42 crc kubenswrapper[4857]: E1128 13:48:42.821388 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" podUID="5dc1083e-b6ad-4424-982a-e85aeac54c1f" Nov 28 13:48:42 crc kubenswrapper[4857]: E1128 13:48:42.905718 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" podUID="cf39f47e-7284-4fae-949d-1b4de9e97751" Nov 28 13:48:43 crc kubenswrapper[4857]: E1128 13:48:43.052257 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" podUID="ea510000-70d9-4371-b791-6872e8d6905c" Nov 28 13:48:43 crc kubenswrapper[4857]: E1128 13:48:43.230817 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" podUID="7187e0f3-1feb-401a-bda3-900798b760c1" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.253058 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" event={"ID":"8102fecd-5745-44b9-aa1c-37bd0662a28d","Type":"ContainerStarted","Data":"6c0044719bdb0f11b1156050252b36a50bc1b88dcb7a3cff8e43b34c6301c6d2"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.253226 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.255128 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" event={"ID":"d523105a-fbc2-47fc-ba6d-1738679751bc","Type":"ContainerStarted","Data":"ceca9d0d98e290edc7f5f06f2de14f82628bbbf54ea3707d2646110899d89b06"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.255310 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.256806 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.257096 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" event={"ID":"77d1fd9b-46fd-4df8-bfd4-2c735e2d7504","Type":"ContainerStarted","Data":"df026ef0fe764c3b58de0f884a8e1a9c4693532a235420857d28ddababb176ba"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.257313 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.258478 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.258838 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.259469 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" event={"ID":"7187e0f3-1feb-401a-bda3-900798b760c1","Type":"ContainerStarted","Data":"fe0fd74d08a5241fe61b5edc4838a76b3c533adac5babc1503b79afe728ddba2"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.260967 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" event={"ID":"124ac5de-0ba4-4863-a225-750a2fb5570f","Type":"ContainerStarted","Data":"3ac4663930f924afbe2cdf7d5ff99f4b361bb1608f6130fa25b57cc76893ea69"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.262862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" event={"ID":"d7be9e32-53ca-40c8-8f12-8ee0ed5e924c","Type":"ContainerStarted","Data":"953a5bdb2926f89081bb3230f2846642f59b4513c23ac6ea24a00f4ca94cfefd"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.263248 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.267657 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.268398 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" event={"ID":"5293402f-6ffd-4df9-853f-1c73a8d8b887","Type":"ContainerStarted","Data":"12da5e93d8c3fff52d6564d1dff2f48fd5052fcdfae6dc10465df95e23811332"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.270283 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" event={"ID":"5dc1083e-b6ad-4424-982a-e85aeac54c1f","Type":"ContainerStarted","Data":"e3aab1ca58cb0294fe864b6ba3c25d591ecbe34350bfa2fb72a1fb2d2c1cead1"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.280297 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" event={"ID":"cf39f47e-7284-4fae-949d-1b4de9e97751","Type":"ContainerStarted","Data":"aae595b310fac29121e57497d9d40fa3913c28d6001d8fa0d2307decb2dc8e10"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.292381 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-vlcd8" podStartSLOduration=3.396273915 podStartE2EDuration="31.292361539s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.360271132 +0000 UTC m=+1144.484212569" lastFinishedPulling="2025-11-28 13:48:42.256358756 +0000 UTC m=+1172.380300193" observedRunningTime="2025-11-28 13:48:43.280228533 +0000 UTC m=+1173.404169970" watchObservedRunningTime="2025-11-28 13:48:43.292361539 +0000 UTC m=+1173.416302976" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.294436 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" event={"ID":"c62fc075-98ca-4cda-b5a6-be5da222c5c3","Type":"ContainerStarted","Data":"04218be84051e1cf7c04cf5754285f96aff1c36e146db7f5875acbe02e937029"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.295624 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.299993 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" event={"ID":"85e2c248-e4f5-4192-bef2-e14c956e16f7","Type":"ContainerStarted","Data":"8c7714797ff282e239ccf803a24f3f7488283c4608036385f58284c7cf417c2e"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.300089 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.314406 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" event={"ID":"ea510000-70d9-4371-b791-6872e8d6905c","Type":"ContainerStarted","Data":"bf388b33f92800413199b7e4089f2e24c05ef00c8cae805dda9bb5344693b0f8"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.345374 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" event={"ID":"0fb31297-9385-4426-a232-463e60388e72","Type":"ContainerStarted","Data":"7ce475c9e09c373bdd45ebde800400dff17df4f420b0e8511a33323f15aab617"} Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.346999 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.348711 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.402896 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-l44sf" podStartSLOduration=2.906698482 podStartE2EDuration="32.402871231s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:12.812249822 +0000 UTC m=+1142.936191259" lastFinishedPulling="2025-11-28 13:48:42.308422571 +0000 UTC m=+1172.432364008" observedRunningTime="2025-11-28 13:48:43.348468767 +0000 UTC m=+1173.472410214" watchObservedRunningTime="2025-11-28 13:48:43.402871231 +0000 UTC m=+1173.526812678" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.411128 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-bff9n" podStartSLOduration=2.480104599 podStartE2EDuration="31.411107759s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.332141647 +0000 UTC m=+1143.456083084" lastFinishedPulling="2025-11-28 13:48:42.263144797 +0000 UTC m=+1172.387086244" observedRunningTime="2025-11-28 13:48:43.383848201 +0000 UTC m=+1173.507789638" watchObservedRunningTime="2025-11-28 13:48:43.411107759 +0000 UTC m=+1173.535049196" Nov 28 13:48:43 crc kubenswrapper[4857]: E1128 13:48:43.429894 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" podUID="47d1bb5d-1b83-4ff2-b760-a08e56cce245" Nov 28 13:48:43 crc kubenswrapper[4857]: E1128 13:48:43.460966 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" podUID="b51da38a-adbe-4c14-86c8-7294f399b971" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.526490 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-vbr94" podStartSLOduration=3.1878349 podStartE2EDuration="31.526473664s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.930813072 +0000 UTC m=+1144.054754509" lastFinishedPulling="2025-11-28 13:48:42.269451836 +0000 UTC m=+1172.393393273" observedRunningTime="2025-11-28 13:48:43.526049793 +0000 UTC m=+1173.649991230" watchObservedRunningTime="2025-11-28 13:48:43.526473664 +0000 UTC m=+1173.650415091" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.625464 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-fstp8" podStartSLOduration=4.294063313 podStartE2EDuration="32.625443844s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.938791064 +0000 UTC m=+1144.062732501" lastFinishedPulling="2025-11-28 13:48:42.270171555 +0000 UTC m=+1172.394113032" observedRunningTime="2025-11-28 13:48:43.614809136 +0000 UTC m=+1173.738750573" watchObservedRunningTime="2025-11-28 13:48:43.625443844 +0000 UTC m=+1173.749385281" Nov 28 13:48:43 crc kubenswrapper[4857]: I1128 13:48:43.635299 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-f5zhv" podStartSLOduration=3.312823184 podStartE2EDuration="32.635282483s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:12.941763585 +0000 UTC m=+1143.065705022" lastFinishedPulling="2025-11-28 13:48:42.264222884 +0000 UTC m=+1172.388164321" observedRunningTime="2025-11-28 13:48:43.632138984 +0000 UTC m=+1173.756080421" watchObservedRunningTime="2025-11-28 13:48:43.635282483 +0000 UTC m=+1173.759223920" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.355165 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" event={"ID":"ec049f1f-9c56-4593-9953-3f18a7c90887","Type":"ContainerStarted","Data":"097d60257bbb111fa4aa3302dcd80ebb200274a8b0779691b9f37a4f6bfb9026"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.356061 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.358238 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" event={"ID":"85e2c248-e4f5-4192-bef2-e14c956e16f7","Type":"ContainerStarted","Data":"ec52cd97a395c66576abe5da2829985b279a42f4537101ccd0bf69373f10d1b7"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.358478 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.359981 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" event={"ID":"124ac5de-0ba4-4863-a225-750a2fb5570f","Type":"ContainerStarted","Data":"315ce1a5380a5f7fcff89cfa6f41836fc195e9b58c32ac7a1c4e527273412fbd"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.360351 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.362860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" event={"ID":"b51da38a-adbe-4c14-86c8-7294f399b971","Type":"ContainerStarted","Data":"296c7e34a3c151e50b2103f9265cc06b797abc802d7e800ca5cb0bdb3746b047"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.365663 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.366386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" event={"ID":"057e15a1-c387-4614-b809-003fcbc1053d","Type":"ContainerStarted","Data":"b77337829c9816b895bb6b927579252e8510ab61b16caa50cfb84fb0fc13fd36"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.367750 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.368937 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.372268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" event={"ID":"cf39f47e-7284-4fae-949d-1b4de9e97751","Type":"ContainerStarted","Data":"a80dc3d177d33cb108bd8bbf3fbf5834b00b4dca90ee6163f9148387898abc13"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.373080 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.374350 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-brvlk" podStartSLOduration=4.041790894 podStartE2EDuration="32.374325214s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.937705876 +0000 UTC m=+1144.061647313" lastFinishedPulling="2025-11-28 13:48:42.270240196 +0000 UTC m=+1172.394181633" observedRunningTime="2025-11-28 13:48:44.37375429 +0000 UTC m=+1174.497695727" watchObservedRunningTime="2025-11-28 13:48:44.374325214 +0000 UTC m=+1174.498266651" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.375039 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" event={"ID":"7187e0f3-1feb-401a-bda3-900798b760c1","Type":"ContainerStarted","Data":"a7553222ea1640eed8b957befe1f8449bf705ea3b6cff223be21c27eb760e3ab"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.375183 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.376749 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" event={"ID":"c226ccd3-f651-4ad7-91c4-4fa0f194c415","Type":"ContainerStarted","Data":"9aa273fe71299e0605ce10cfd9552cc356a11d66ef67e8b3c3cbafc424b45151"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.378014 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.380310 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" event={"ID":"47d1bb5d-1b83-4ff2-b760-a08e56cce245","Type":"ContainerStarted","Data":"3b06bd2037aa091a6ca4d90f25ce53f316b95c45a193d13b0dc0d43aa313f060"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.380463 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.390464 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" event={"ID":"9579cba1-740c-4675-beb4-858ee406b22b","Type":"ContainerStarted","Data":"45dc0eed9713bc26c2b9d7c1fbc7aee495028045192e26046d7a2e1c3620ead4"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.391154 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.393048 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" event={"ID":"dcfce002-7195-42ef-932f-c5a8eebeb87f","Type":"ContainerStarted","Data":"21e10af3f2f90708875396eb539dc8dfd7c59d395b9beefe52d8d732ec92276d"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.395017 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.395384 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.396562 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.401626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" event={"ID":"ea510000-70d9-4371-b791-6872e8d6905c","Type":"ContainerStarted","Data":"8900ba7061388a2fbbb899e053edee96701871601569ea0bed954777249d27b7"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.402158 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.404609 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" event={"ID":"1a693631-7842-449a-98ab-9b3668d8bbf6","Type":"ContainerStarted","Data":"7464341b16b6aa4cfff577add1348a7248508da713d43b374939b8d381afd8ab"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.405315 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.414779 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.416287 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" event={"ID":"5293402f-6ffd-4df9-853f-1c73a8d8b887","Type":"ContainerStarted","Data":"13b03ebe2ad5750962b27e4aab1a0076d64e14f87e2a0e4a2de8cf9bbe6cd311"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.416924 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.420015 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" event={"ID":"5dc1083e-b6ad-4424-982a-e85aeac54c1f","Type":"ContainerStarted","Data":"b21d175f7b9610ee4324a9e998a7db893e50671739f5a3d0dc3c32a62eb9f97c"} Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.420034 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.439629 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" podStartSLOduration=25.133574265 podStartE2EDuration="32.439615964s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:34.85109498 +0000 UTC m=+1164.975036417" lastFinishedPulling="2025-11-28 13:48:42.157136679 +0000 UTC m=+1172.281078116" observedRunningTime="2025-11-28 13:48:44.433038088 +0000 UTC m=+1174.556979525" watchObservedRunningTime="2025-11-28 13:48:44.439615964 +0000 UTC m=+1174.563557401" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.512892 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" podStartSLOduration=2.9957180660000002 podStartE2EDuration="32.512875205s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.350106535 +0000 UTC m=+1144.474047972" lastFinishedPulling="2025-11-28 13:48:43.867263674 +0000 UTC m=+1173.991205111" observedRunningTime="2025-11-28 13:48:44.506005421 +0000 UTC m=+1174.629946858" watchObservedRunningTime="2025-11-28 13:48:44.512875205 +0000 UTC m=+1174.636816642" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.548385 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-bjf4d" podStartSLOduration=4.724435847 podStartE2EDuration="33.548371702s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.92242361 +0000 UTC m=+1144.046365057" lastFinishedPulling="2025-11-28 13:48:42.746359485 +0000 UTC m=+1172.870300912" observedRunningTime="2025-11-28 13:48:44.545811637 +0000 UTC m=+1174.669753074" watchObservedRunningTime="2025-11-28 13:48:44.548371702 +0000 UTC m=+1174.672313139" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.587247 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" podStartSLOduration=2.671079534 podStartE2EDuration="32.587231833s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.922068261 +0000 UTC m=+1144.046009688" lastFinishedPulling="2025-11-28 13:48:43.83822055 +0000 UTC m=+1173.962161987" observedRunningTime="2025-11-28 13:48:44.581648482 +0000 UTC m=+1174.705589919" watchObservedRunningTime="2025-11-28 13:48:44.587231833 +0000 UTC m=+1174.711173270" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.672422 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-4l8jv" podStartSLOduration=4.710725234 podStartE2EDuration="32.672401025s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.360472457 +0000 UTC m=+1144.484413894" lastFinishedPulling="2025-11-28 13:48:42.322148248 +0000 UTC m=+1172.446089685" observedRunningTime="2025-11-28 13:48:44.64489588 +0000 UTC m=+1174.768837317" watchObservedRunningTime="2025-11-28 13:48:44.672401025 +0000 UTC m=+1174.796342452" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.692337 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-gcvrp" podStartSLOduration=4.306170253 podStartE2EDuration="32.692316488s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.92203991 +0000 UTC m=+1144.045981347" lastFinishedPulling="2025-11-28 13:48:42.308186145 +0000 UTC m=+1172.432127582" observedRunningTime="2025-11-28 13:48:44.684612553 +0000 UTC m=+1174.808553990" watchObservedRunningTime="2025-11-28 13:48:44.692316488 +0000 UTC m=+1174.816257925" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.730492 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" podStartSLOduration=3.114157818 podStartE2EDuration="32.730475012s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.344807651 +0000 UTC m=+1144.468749098" lastFinishedPulling="2025-11-28 13:48:43.961124825 +0000 UTC m=+1174.085066292" observedRunningTime="2025-11-28 13:48:44.725812764 +0000 UTC m=+1174.849754211" watchObservedRunningTime="2025-11-28 13:48:44.730475012 +0000 UTC m=+1174.854416449" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.765208 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" podStartSLOduration=2.896036563 podStartE2EDuration="33.765188979s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:12.910522845 +0000 UTC m=+1143.034464282" lastFinishedPulling="2025-11-28 13:48:43.779675261 +0000 UTC m=+1173.903616698" observedRunningTime="2025-11-28 13:48:44.75253783 +0000 UTC m=+1174.876479267" watchObservedRunningTime="2025-11-28 13:48:44.765188979 +0000 UTC m=+1174.889130416" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.773558 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" podStartSLOduration=3.252416371 podStartE2EDuration="32.77354089s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.344925744 +0000 UTC m=+1144.468867181" lastFinishedPulling="2025-11-28 13:48:43.866050263 +0000 UTC m=+1173.989991700" observedRunningTime="2025-11-28 13:48:44.771283323 +0000 UTC m=+1174.895224760" watchObservedRunningTime="2025-11-28 13:48:44.77354089 +0000 UTC m=+1174.897482327" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.816360 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" podStartSLOduration=26.477772921 podStartE2EDuration="33.816342092s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:34.837822794 +0000 UTC m=+1164.961764231" lastFinishedPulling="2025-11-28 13:48:42.176391955 +0000 UTC m=+1172.300333402" observedRunningTime="2025-11-28 13:48:44.813269374 +0000 UTC m=+1174.937210821" watchObservedRunningTime="2025-11-28 13:48:44.816342092 +0000 UTC m=+1174.940283519" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.833875 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-xndsj" podStartSLOduration=4.519057442 podStartE2EDuration="32.833852404s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:14.349705755 +0000 UTC m=+1144.473647192" lastFinishedPulling="2025-11-28 13:48:42.664500707 +0000 UTC m=+1172.788442154" observedRunningTime="2025-11-28 13:48:44.831270709 +0000 UTC m=+1174.955212146" watchObservedRunningTime="2025-11-28 13:48:44.833852404 +0000 UTC m=+1174.957793841" Nov 28 13:48:44 crc kubenswrapper[4857]: I1128 13:48:44.849683 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-gq64h" podStartSLOduration=5.203621982 podStartE2EDuration="33.849666073s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.643580245 +0000 UTC m=+1143.767521672" lastFinishedPulling="2025-11-28 13:48:42.289624326 +0000 UTC m=+1172.413565763" observedRunningTime="2025-11-28 13:48:44.845366245 +0000 UTC m=+1174.969307702" watchObservedRunningTime="2025-11-28 13:48:44.849666073 +0000 UTC m=+1174.973607510" Nov 28 13:48:47 crc kubenswrapper[4857]: I1128 13:48:47.942649 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-8rl4w" Nov 28 13:48:48 crc kubenswrapper[4857]: I1128 13:48:48.429410 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt" Nov 28 13:48:48 crc kubenswrapper[4857]: I1128 13:48:48.958306 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7894b8b8d-fxkwb" Nov 28 13:48:52 crc kubenswrapper[4857]: I1128 13:48:52.191190 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-w2tjv" Nov 28 13:48:52 crc kubenswrapper[4857]: I1128 13:48:52.690000 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-lk8hf" Nov 28 13:48:52 crc kubenswrapper[4857]: I1128 13:48:52.733050 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-b8hn9" Nov 28 13:48:52 crc kubenswrapper[4857]: I1128 13:48:52.791342 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-qcr9m" Nov 28 13:48:52 crc kubenswrapper[4857]: I1128 13:48:52.881448 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" Nov 28 13:48:57 crc kubenswrapper[4857]: I1128 13:48:57.521612 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" event={"ID":"47d1bb5d-1b83-4ff2-b760-a08e56cce245","Type":"ContainerStarted","Data":"d647ba8edfaf6259c2797f0229e5c85fa62dd1ca2e28e53d907ae1a4cc7bf099"} Nov 28 13:48:57 crc kubenswrapper[4857]: I1128 13:48:57.524617 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:48:57 crc kubenswrapper[4857]: I1128 13:48:57.524691 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" event={"ID":"b51da38a-adbe-4c14-86c8-7294f399b971","Type":"ContainerStarted","Data":"ef2d225606be21e7309710bad14d3ca5a736645b377e7d0762fc2675637acb4c"} Nov 28 13:48:57 crc kubenswrapper[4857]: I1128 13:48:57.524724 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:48:57 crc kubenswrapper[4857]: I1128 13:48:57.535748 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" podStartSLOduration=3.080203536 podStartE2EDuration="46.535729733s" podCreationTimestamp="2025-11-28 13:48:11 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.635096291 +0000 UTC m=+1143.759037728" lastFinishedPulling="2025-11-28 13:48:57.090622478 +0000 UTC m=+1187.214563925" observedRunningTime="2025-11-28 13:48:57.534458751 +0000 UTC m=+1187.658400238" watchObservedRunningTime="2025-11-28 13:48:57.535729733 +0000 UTC m=+1187.659671170" Nov 28 13:48:57 crc kubenswrapper[4857]: I1128 13:48:57.548486 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" podStartSLOduration=1.790677012 podStartE2EDuration="45.548468495s" podCreationTimestamp="2025-11-28 13:48:12 +0000 UTC" firstStartedPulling="2025-11-28 13:48:13.332590729 +0000 UTC m=+1143.456532166" lastFinishedPulling="2025-11-28 13:48:57.090382202 +0000 UTC m=+1187.214323649" observedRunningTime="2025-11-28 13:48:57.548064865 +0000 UTC m=+1187.672006322" watchObservedRunningTime="2025-11-28 13:48:57.548468495 +0000 UTC m=+1187.672409932" Nov 28 13:49:02 crc kubenswrapper[4857]: I1128 13:49:02.404696 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-ntrps" Nov 28 13:49:02 crc kubenswrapper[4857]: I1128 13:49:02.588698 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-2hbkq" Nov 28 13:49:11 crc kubenswrapper[4857]: I1128 13:49:11.308576 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:49:11 crc kubenswrapper[4857]: I1128 13:49:11.309132 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.279994 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-spcl4"] Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.281675 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.283459 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.283637 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.283851 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-cxnjf" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.284889 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.301933 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-spcl4"] Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.348606 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-75vnk"] Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.349783 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.352667 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.362820 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-75vnk"] Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.434060 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.434394 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb7ld\" (UniqueName: \"kubernetes.io/projected/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-kube-api-access-pb7ld\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.434525 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-config\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.434618 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da61181a-e0e6-4273-9070-9c3048727707-config\") pod \"dnsmasq-dns-675f4bcbfc-spcl4\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.434713 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vdmn\" (UniqueName: \"kubernetes.io/projected/da61181a-e0e6-4273-9070-9c3048727707-kube-api-access-4vdmn\") pod \"dnsmasq-dns-675f4bcbfc-spcl4\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.536026 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb7ld\" (UniqueName: \"kubernetes.io/projected/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-kube-api-access-pb7ld\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.536093 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-config\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.536111 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da61181a-e0e6-4273-9070-9c3048727707-config\") pod \"dnsmasq-dns-675f4bcbfc-spcl4\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.536126 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vdmn\" (UniqueName: \"kubernetes.io/projected/da61181a-e0e6-4273-9070-9c3048727707-kube-api-access-4vdmn\") pod \"dnsmasq-dns-675f4bcbfc-spcl4\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.536159 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.537064 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.538012 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-config\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.538062 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da61181a-e0e6-4273-9070-9c3048727707-config\") pod \"dnsmasq-dns-675f4bcbfc-spcl4\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.559794 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vdmn\" (UniqueName: \"kubernetes.io/projected/da61181a-e0e6-4273-9070-9c3048727707-kube-api-access-4vdmn\") pod \"dnsmasq-dns-675f4bcbfc-spcl4\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.560866 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb7ld\" (UniqueName: \"kubernetes.io/projected/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-kube-api-access-pb7ld\") pod \"dnsmasq-dns-78dd6ddcc-75vnk\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.627632 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:19 crc kubenswrapper[4857]: I1128 13:49:19.675298 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.075334 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-spcl4"] Nov 28 13:49:20 crc kubenswrapper[4857]: W1128 13:49:20.080064 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda61181a_e0e6_4273_9070_9c3048727707.slice/crio-aff75d332fb089851d01a7685429995d049d6c2493ab084ad4de67165f5a983f WatchSource:0}: Error finding container aff75d332fb089851d01a7685429995d049d6c2493ab084ad4de67165f5a983f: Status 404 returned error can't find the container with id aff75d332fb089851d01a7685429995d049d6c2493ab084ad4de67165f5a983f Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.143781 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-75vnk"] Nov 28 13:49:20 crc kubenswrapper[4857]: W1128 13:49:20.149352 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2507b0ba_8cf5_4bdd_be01_f4c69188fd66.slice/crio-510d28903e360f25c722473f0ed05bca9e5af5f8ab012d4b051be956c0f1c1b8 WatchSource:0}: Error finding container 510d28903e360f25c722473f0ed05bca9e5af5f8ab012d4b051be956c0f1c1b8: Status 404 returned error can't find the container with id 510d28903e360f25c722473f0ed05bca9e5af5f8ab012d4b051be956c0f1c1b8 Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.701833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" event={"ID":"da61181a-e0e6-4273-9070-9c3048727707","Type":"ContainerStarted","Data":"aff75d332fb089851d01a7685429995d049d6c2493ab084ad4de67165f5a983f"} Nov 28 13:49:20 crc kubenswrapper[4857]: I1128 13:49:20.703015 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" event={"ID":"2507b0ba-8cf5-4bdd-be01-f4c69188fd66","Type":"ContainerStarted","Data":"510d28903e360f25c722473f0ed05bca9e5af5f8ab012d4b051be956c0f1c1b8"} Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.356721 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-spcl4"] Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.373025 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xg7zf"] Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.374193 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.385793 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xg7zf"] Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.481814 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.481903 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krsdv\" (UniqueName: \"kubernetes.io/projected/8ac57da8-31cb-4a73-a503-b9764ecbc24e-kube-api-access-krsdv\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.481985 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-config\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.583465 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krsdv\" (UniqueName: \"kubernetes.io/projected/8ac57da8-31cb-4a73-a503-b9764ecbc24e-kube-api-access-krsdv\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.583513 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-config\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.583598 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.584464 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.587687 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-config\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.604225 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krsdv\" (UniqueName: \"kubernetes.io/projected/8ac57da8-31cb-4a73-a503-b9764ecbc24e-kube-api-access-krsdv\") pod \"dnsmasq-dns-666b6646f7-xg7zf\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.705070 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-75vnk"] Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.719300 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.735825 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-r5hzx"] Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.737693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.763104 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-r5hzx"] Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.786624 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.786737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m98fc\" (UniqueName: \"kubernetes.io/projected/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-kube-api-access-m98fc\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.786883 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-config\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.887787 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.888181 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m98fc\" (UniqueName: \"kubernetes.io/projected/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-kube-api-access-m98fc\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.888269 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-config\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.889228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-config\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.889784 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:22 crc kubenswrapper[4857]: I1128 13:49:22.916137 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m98fc\" (UniqueName: \"kubernetes.io/projected/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-kube-api-access-m98fc\") pod \"dnsmasq-dns-57d769cc4f-r5hzx\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.116457 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.286722 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xg7zf"] Nov 28 13:49:23 crc kubenswrapper[4857]: W1128 13:49:23.293490 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ac57da8_31cb_4a73_a503_b9764ecbc24e.slice/crio-6766686b10af1a2bdcf06bc6270406677ab0f2bbec1c476ed4a33c7deba8323a WatchSource:0}: Error finding container 6766686b10af1a2bdcf06bc6270406677ab0f2bbec1c476ed4a33c7deba8323a: Status 404 returned error can't find the container with id 6766686b10af1a2bdcf06bc6270406677ab0f2bbec1c476ed4a33c7deba8323a Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.582189 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.584744 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.587897 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.587937 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.587973 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-s98x6" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.588158 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.588224 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.588909 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.590494 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.599388 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.647661 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-r5hzx"] Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703500 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703518 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703533 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703554 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5550a25-04ef-4dde-afd4-627f1df97a90-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703584 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703603 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5550a25-04ef-4dde-afd4-627f1df97a90-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703628 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg4g7\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-kube-api-access-jg4g7\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703681 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703700 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.703715 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.740480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" event={"ID":"9cd2e351-6a59-49e5-89b3-d0d45981cfb3","Type":"ContainerStarted","Data":"082cd85f18cc9acd90b32c340c7d83b4c19043c4424d8738a1966f257154b896"} Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.741625 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" event={"ID":"8ac57da8-31cb-4a73-a503-b9764ecbc24e","Type":"ContainerStarted","Data":"6766686b10af1a2bdcf06bc6270406677ab0f2bbec1c476ed4a33c7deba8323a"} Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805170 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805227 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5550a25-04ef-4dde-afd4-627f1df97a90-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805266 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg4g7\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-kube-api-access-jg4g7\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805309 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805333 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805392 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805425 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805473 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805798 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.805929 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5550a25-04ef-4dde-afd4-627f1df97a90-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.806108 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.806268 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.806935 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-server-conf\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.807544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.808331 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.810303 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.810319 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5550a25-04ef-4dde-afd4-627f1df97a90-pod-info\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.810678 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.818412 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5550a25-04ef-4dde-afd4-627f1df97a90-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.829559 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.832855 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg4g7\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-kube-api-access-jg4g7\") pod \"rabbitmq-server-0\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " pod="openstack/rabbitmq-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.888785 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.890041 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.892252 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.892706 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.892776 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.892870 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.892973 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.892997 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-2bhbr" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.893146 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 28 13:49:23 crc kubenswrapper[4857]: I1128 13:49:23.909953 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009103 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5ec18e7-6719-46dd-b580-303f3da41869-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009159 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009211 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009239 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009372 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009513 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zlqp\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-kube-api-access-7zlqp\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009563 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5ec18e7-6719-46dd-b580-303f3da41869-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.009599 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.017692 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5ec18e7-6719-46dd-b580-303f3da41869-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111652 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111701 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111728 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111764 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111784 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111828 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111854 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zlqp\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-kube-api-access-7zlqp\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111875 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5ec18e7-6719-46dd-b580-303f3da41869-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111898 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.111920 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.112994 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.113504 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.113660 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.113749 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.114218 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.115243 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.116717 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5ec18e7-6719-46dd-b580-303f3da41869-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.117320 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.117357 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5ec18e7-6719-46dd-b580-303f3da41869-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.148218 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.148733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zlqp\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-kube-api-access-7zlqp\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.150775 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.212296 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.983974 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.985207 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.988869 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-g4gh7" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.989087 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.989313 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.990213 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 13:49:24 crc kubenswrapper[4857]: I1128 13:49:24.996325 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:24.999754 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026424 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-kolla-config\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026530 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026590 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-config-data-default\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026687 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026870 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmch9\" (UniqueName: \"kubernetes.io/projected/f6c9b673-669e-464a-b012-8b39314e1990-kube-api-access-cmch9\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026931 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f6c9b673-669e-464a-b012-8b39314e1990-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.026973 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.027053 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.128858 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-config-data-default\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.128916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129000 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmch9\" (UniqueName: \"kubernetes.io/projected/f6c9b673-669e-464a-b012-8b39314e1990-kube-api-access-cmch9\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129025 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f6c9b673-669e-464a-b012-8b39314e1990-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129042 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129063 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129120 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-kolla-config\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129137 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129528 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.129908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f6c9b673-669e-464a-b012-8b39314e1990-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.130120 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-kolla-config\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.130647 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-config-data-default\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.130806 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.135701 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.137270 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.151901 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmch9\" (UniqueName: \"kubernetes.io/projected/f6c9b673-669e-464a-b012-8b39314e1990-kube-api-access-cmch9\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.153148 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-galera-0\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " pod="openstack/openstack-galera-0" Nov 28 13:49:25 crc kubenswrapper[4857]: I1128 13:49:25.313903 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.476687 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.479638 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.495582 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.496126 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.496321 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-cmn88" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.497086 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.501673 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.554833 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.554911 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.554982 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.555016 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.555041 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.555087 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.555128 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7zlx\" (UniqueName: \"kubernetes.io/projected/75c7c292-3658-4264-b86b-2a825aeb9ad4-kube-api-access-t7zlx\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.555154 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.656983 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657038 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657070 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657093 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657148 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657178 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7zlx\" (UniqueName: \"kubernetes.io/projected/75c7c292-3658-4264-b86b-2a825aeb9ad4-kube-api-access-t7zlx\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657198 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657383 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.657565 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.658201 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.658651 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.659229 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.669026 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.674142 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.679845 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7zlx\" (UniqueName: \"kubernetes.io/projected/75c7c292-3658-4264-b86b-2a825aeb9ad4-kube-api-access-t7zlx\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.693177 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"openstack-cell1-galera-0\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.839824 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.903112 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.906091 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.908213 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-npp8w" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.908252 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.908220 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.922145 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.961718 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.962014 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-kolla-config\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.962104 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.962129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97dgx\" (UniqueName: \"kubernetes.io/projected/f1f87bb5-7cc1-4533-b145-d855e45205ca-kube-api-access-97dgx\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:26 crc kubenswrapper[4857]: I1128 13:49:26.962147 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-config-data\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.063419 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.063742 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97dgx\" (UniqueName: \"kubernetes.io/projected/f1f87bb5-7cc1-4533-b145-d855e45205ca-kube-api-access-97dgx\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.063763 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-config-data\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.063844 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.063897 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-kolla-config\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.064766 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-kolla-config\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.064803 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-config-data\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.067039 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.068756 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.090677 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97dgx\" (UniqueName: \"kubernetes.io/projected/f1f87bb5-7cc1-4533-b145-d855e45205ca-kube-api-access-97dgx\") pod \"memcached-0\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " pod="openstack/memcached-0" Nov 28 13:49:27 crc kubenswrapper[4857]: I1128 13:49:27.227054 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.638896 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.639855 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.641628 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-r5z5g" Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.646687 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.694848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5pjv\" (UniqueName: \"kubernetes.io/projected/818836f7-7e48-4477-9e79-41c71000929d-kube-api-access-p5pjv\") pod \"kube-state-metrics-0\" (UID: \"818836f7-7e48-4477-9e79-41c71000929d\") " pod="openstack/kube-state-metrics-0" Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.796485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5pjv\" (UniqueName: \"kubernetes.io/projected/818836f7-7e48-4477-9e79-41c71000929d-kube-api-access-p5pjv\") pod \"kube-state-metrics-0\" (UID: \"818836f7-7e48-4477-9e79-41c71000929d\") " pod="openstack/kube-state-metrics-0" Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.845816 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5pjv\" (UniqueName: \"kubernetes.io/projected/818836f7-7e48-4477-9e79-41c71000929d-kube-api-access-p5pjv\") pod \"kube-state-metrics-0\" (UID: \"818836f7-7e48-4477-9e79-41c71000929d\") " pod="openstack/kube-state-metrics-0" Nov 28 13:49:28 crc kubenswrapper[4857]: I1128 13:49:28.955133 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.701756 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-zhh8w"] Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.703109 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.707117 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.707240 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.707531 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-hhwjq" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.717731 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-zhh8w"] Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.726230 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-hgm54"] Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.728377 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.737801 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hgm54"] Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848615 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-etc-ovs\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848676 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848791 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-lib\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848836 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-ovn-controller-tls-certs\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848963 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8730d5-79df-4483-a263-1dd72a7ee079-scripts\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848984 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-run\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.848998 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-log\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.849129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76ngp\" (UniqueName: \"kubernetes.io/projected/21fe1399-7f40-43ec-bee8-868c937a6e19-kube-api-access-76ngp\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.849161 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c2wc\" (UniqueName: \"kubernetes.io/projected/de8730d5-79df-4483-a263-1dd72a7ee079-kube-api-access-9c2wc\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.849192 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/21fe1399-7f40-43ec-bee8-868c937a6e19-scripts\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.849218 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-log-ovn\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.849262 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run-ovn\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.849318 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-combined-ca-bundle\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.950669 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76ngp\" (UniqueName: \"kubernetes.io/projected/21fe1399-7f40-43ec-bee8-868c937a6e19-kube-api-access-76ngp\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.950750 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c2wc\" (UniqueName: \"kubernetes.io/projected/de8730d5-79df-4483-a263-1dd72a7ee079-kube-api-access-9c2wc\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.950778 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/21fe1399-7f40-43ec-bee8-868c937a6e19-scripts\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.950804 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-log-ovn\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.950844 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run-ovn\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.950881 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-combined-ca-bundle\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.951231 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-etc-ovs\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.951631 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-log-ovn\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.951658 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-etc-ovs\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.951870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953499 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run-ovn\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953573 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953612 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-lib\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-ovn-controller-tls-certs\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8730d5-79df-4483-a263-1dd72a7ee079-scripts\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953739 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-run\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953764 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-log\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.953887 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-run\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.955459 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/21fe1399-7f40-43ec-bee8-868c937a6e19-scripts\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.956829 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8730d5-79df-4483-a263-1dd72a7ee079-scripts\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.964279 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-lib\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.964541 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-log\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.965378 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76ngp\" (UniqueName: \"kubernetes.io/projected/21fe1399-7f40-43ec-bee8-868c937a6e19-kube-api-access-76ngp\") pod \"ovn-controller-ovs-hgm54\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.965746 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-ovn-controller-tls-certs\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.967321 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-combined-ca-bundle\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:31 crc kubenswrapper[4857]: I1128 13:49:31.969540 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c2wc\" (UniqueName: \"kubernetes.io/projected/de8730d5-79df-4483-a263-1dd72a7ee079-kube-api-access-9c2wc\") pod \"ovn-controller-zhh8w\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:32 crc kubenswrapper[4857]: I1128 13:49:32.025816 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w" Nov 28 13:49:32 crc kubenswrapper[4857]: I1128 13:49:32.043365 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.369159 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.370888 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.377710 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.379410 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.379956 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.380692 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-rfkvd" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.380872 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.386700 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487321 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-config\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487391 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487427 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487462 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487513 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487597 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lc4bv\" (UniqueName: \"kubernetes.io/projected/dceb7667-07bc-486b-b65f-c87427949ffd-kube-api-access-lc4bv\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.487621 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589156 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-config\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589214 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589233 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589269 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589303 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589346 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589373 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lc4bv\" (UniqueName: \"kubernetes.io/projected/dceb7667-07bc-486b-b65f-c87427949ffd-kube-api-access-lc4bv\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589395 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589621 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589656 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.589985 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-config\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.590619 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.598798 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.601487 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.601606 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.611104 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.623976 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lc4bv\" (UniqueName: \"kubernetes.io/projected/dceb7667-07bc-486b-b65f-c87427949ffd-kube-api-access-lc4bv\") pod \"ovsdbserver-nb-0\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:33 crc kubenswrapper[4857]: I1128 13:49:33.702426 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.525134 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.526765 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.529409 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.530887 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.532675 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-5snqg" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.532773 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.534175 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.624939 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625279 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625334 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4msqh\" (UniqueName: \"kubernetes.io/projected/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-kube-api-access-4msqh\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625441 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625509 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-config\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625541 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.625604 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726584 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-config\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726653 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726749 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726770 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.726824 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4msqh\" (UniqueName: \"kubernetes.io/projected/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-kube-api-access-4msqh\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.727082 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.727406 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.727781 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-config\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.728673 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.731801 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.733083 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.743831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4msqh\" (UniqueName: \"kubernetes.io/projected/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-kube-api-access-4msqh\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.750626 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.755190 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:35 crc kubenswrapper[4857]: I1128 13:49:35.874325 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:49:38 crc kubenswrapper[4857]: E1128 13:49:38.921914 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 13:49:38 crc kubenswrapper[4857]: E1128 13:49:38.922450 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4vdmn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-spcl4_openstack(da61181a-e0e6-4273-9070-9c3048727707): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:49:38 crc kubenswrapper[4857]: E1128 13:49:38.923803 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" podUID="da61181a-e0e6-4273-9070-9c3048727707" Nov 28 13:49:38 crc kubenswrapper[4857]: E1128 13:49:38.931129 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 28 13:49:38 crc kubenswrapper[4857]: E1128 13:49:38.931299 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pb7ld,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-75vnk_openstack(2507b0ba-8cf5-4bdd-be01-f4c69188fd66): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:49:38 crc kubenswrapper[4857]: E1128 13:49:38.932462 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" podUID="2507b0ba-8cf5-4bdd-be01-f4c69188fd66" Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.667742 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:49:39 crc kubenswrapper[4857]: W1128 13:49:39.678622 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75c7c292_3658_4264_b86b_2a825aeb9ad4.slice/crio-28dba4911a2e12d05712cbcb19c532e932a4a8560b01e9701722b9a72b4a8715 WatchSource:0}: Error finding container 28dba4911a2e12d05712cbcb19c532e932a4a8560b01e9701722b9a72b4a8715: Status 404 returned error can't find the container with id 28dba4911a2e12d05712cbcb19c532e932a4a8560b01e9701722b9a72b4a8715 Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.680843 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 13:49:39 crc kubenswrapper[4857]: W1128 13:49:39.684954 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6c9b673_669e_464a_b012_8b39314e1990.slice/crio-0d7c47b9349fc16ac761956ae2eeb57a0d5d177db9e2522a53c6cc79d9ae34af WatchSource:0}: Error finding container 0d7c47b9349fc16ac761956ae2eeb57a0d5d177db9e2522a53c6cc79d9ae34af: Status 404 returned error can't find the container with id 0d7c47b9349fc16ac761956ae2eeb57a0d5d177db9e2522a53c6cc79d9ae34af Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.692588 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-zhh8w"] Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.701451 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.707104 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.712176 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.886830 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w" event={"ID":"de8730d5-79df-4483-a263-1dd72a7ee079","Type":"ContainerStarted","Data":"401716f6caa800e1384b14d3166b9aefb675685799de23b75b75a4b68382fa14"} Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.888424 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e5550a25-04ef-4dde-afd4-627f1df97a90","Type":"ContainerStarted","Data":"aab682b0c44e6ca44a0cd72b01cb044855f1e65aa098404b93df5b556456d257"} Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.890214 4857 generic.go:334] "Generic (PLEG): container finished" podID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerID="7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736" exitCode=0 Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.890301 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" event={"ID":"8ac57da8-31cb-4a73-a503-b9764ecbc24e","Type":"ContainerDied","Data":"7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736"} Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.892412 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f1f87bb5-7cc1-4533-b145-d855e45205ca","Type":"ContainerStarted","Data":"f676462d7229d8985e4b6ca0f9266e20580a12cf20d943bd71b4de086fff09c2"} Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.894737 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f6c9b673-669e-464a-b012-8b39314e1990","Type":"ContainerStarted","Data":"0d7c47b9349fc16ac761956ae2eeb57a0d5d177db9e2522a53c6cc79d9ae34af"} Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.898761 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"818836f7-7e48-4477-9e79-41c71000929d","Type":"ContainerStarted","Data":"0232edd093f05241b1f364f98e0c5082c0eb821cdaec53d2f097c0457534c9ff"} Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.905445 4857 generic.go:334] "Generic (PLEG): container finished" podID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerID="2057821e4fce08d803a52452d7fda50857a2c18e351f32b68064a8453ca5eb95" exitCode=0 Nov 28 13:49:39 crc kubenswrapper[4857]: I1128 13:49:39.905779 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" event={"ID":"9cd2e351-6a59-49e5-89b3-d0d45981cfb3","Type":"ContainerDied","Data":"2057821e4fce08d803a52452d7fda50857a2c18e351f32b68064a8453ca5eb95"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:39.911856 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"75c7c292-3658-4264-b86b-2a825aeb9ad4","Type":"ContainerStarted","Data":"28dba4911a2e12d05712cbcb19c532e932a4a8560b01e9701722b9a72b4a8715"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.040476 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:49:40 crc kubenswrapper[4857]: W1128 13:49:40.059304 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode5ec18e7_6719_46dd_b580_303f3da41869.slice/crio-da71e64fb1ef0e05958001998905152cf192cd8cbccea76087ae208f9a1da63e WatchSource:0}: Error finding container da71e64fb1ef0e05958001998905152cf192cd8cbccea76087ae208f9a1da63e: Status 404 returned error can't find the container with id da71e64fb1ef0e05958001998905152cf192cd8cbccea76087ae208f9a1da63e Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.160284 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:49:40 crc kubenswrapper[4857]: W1128 13:49:40.177231 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dfd7b5e_9e1b_4f85_9933_2f3f55cee399.slice/crio-b27101efeb3ef6efeed65a8ad3adf3ce48550833cc4fee414436587568695007 WatchSource:0}: Error finding container b27101efeb3ef6efeed65a8ad3adf3ce48550833cc4fee414436587568695007: Status 404 returned error can't find the container with id b27101efeb3ef6efeed65a8ad3adf3ce48550833cc4fee414436587568695007 Nov 28 13:49:40 crc kubenswrapper[4857]: E1128 13:49:40.377030 4857 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 28 13:49:40 crc kubenswrapper[4857]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/8ac57da8-31cb-4a73-a503-b9764ecbc24e/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 28 13:49:40 crc kubenswrapper[4857]: > podSandboxID="6766686b10af1a2bdcf06bc6270406677ab0f2bbec1c476ed4a33c7deba8323a" Nov 28 13:49:40 crc kubenswrapper[4857]: E1128 13:49:40.377599 4857 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 28 13:49:40 crc kubenswrapper[4857]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-krsdv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-xg7zf_openstack(8ac57da8-31cb-4a73-a503-b9764ecbc24e): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/8ac57da8-31cb-4a73-a503-b9764ecbc24e/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 28 13:49:40 crc kubenswrapper[4857]: > logger="UnhandledError" Nov 28 13:49:40 crc kubenswrapper[4857]: E1128 13:49:40.378756 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/8ac57da8-31cb-4a73-a503-b9764ecbc24e/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.397128 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.485777 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.527702 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pb7ld\" (UniqueName: \"kubernetes.io/projected/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-kube-api-access-pb7ld\") pod \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.527887 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-dns-svc\") pod \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.527989 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-config\") pod \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\" (UID: \"2507b0ba-8cf5-4bdd-be01-f4c69188fd66\") " Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.528464 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2507b0ba-8cf5-4bdd-be01-f4c69188fd66" (UID: "2507b0ba-8cf5-4bdd-be01-f4c69188fd66"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.528594 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-config" (OuterVolumeSpecName: "config") pod "2507b0ba-8cf5-4bdd-be01-f4c69188fd66" (UID: "2507b0ba-8cf5-4bdd-be01-f4c69188fd66"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.532818 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-kube-api-access-pb7ld" (OuterVolumeSpecName: "kube-api-access-pb7ld") pod "2507b0ba-8cf5-4bdd-be01-f4c69188fd66" (UID: "2507b0ba-8cf5-4bdd-be01-f4c69188fd66"). InnerVolumeSpecName "kube-api-access-pb7ld". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.629420 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da61181a-e0e6-4273-9070-9c3048727707-config\") pod \"da61181a-e0e6-4273-9070-9c3048727707\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.629488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vdmn\" (UniqueName: \"kubernetes.io/projected/da61181a-e0e6-4273-9070-9c3048727707-kube-api-access-4vdmn\") pod \"da61181a-e0e6-4273-9070-9c3048727707\" (UID: \"da61181a-e0e6-4273-9070-9c3048727707\") " Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.629929 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.630211 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.630227 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pb7ld\" (UniqueName: \"kubernetes.io/projected/2507b0ba-8cf5-4bdd-be01-f4c69188fd66-kube-api-access-pb7ld\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.630049 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da61181a-e0e6-4273-9070-9c3048727707-config" (OuterVolumeSpecName: "config") pod "da61181a-e0e6-4273-9070-9c3048727707" (UID: "da61181a-e0e6-4273-9070-9c3048727707"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.632563 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da61181a-e0e6-4273-9070-9c3048727707-kube-api-access-4vdmn" (OuterVolumeSpecName: "kube-api-access-4vdmn") pod "da61181a-e0e6-4273-9070-9c3048727707" (UID: "da61181a-e0e6-4273-9070-9c3048727707"). InnerVolumeSpecName "kube-api-access-4vdmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.731662 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da61181a-e0e6-4273-9070-9c3048727707-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.731707 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vdmn\" (UniqueName: \"kubernetes.io/projected/da61181a-e0e6-4273-9070-9c3048727707-kube-api-access-4vdmn\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.747308 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-hgm54"] Nov 28 13:49:40 crc kubenswrapper[4857]: W1128 13:49:40.902430 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21fe1399_7f40_43ec_bee8_868c937a6e19.slice/crio-80e5d3025c6d5fe4a28030191cceb93e94c859ede9bd97436b6a592eec5a348b WatchSource:0}: Error finding container 80e5d3025c6d5fe4a28030191cceb93e94c859ede9bd97436b6a592eec5a348b: Status 404 returned error can't find the container with id 80e5d3025c6d5fe4a28030191cceb93e94c859ede9bd97436b6a592eec5a348b Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.922470 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e5ec18e7-6719-46dd-b580-303f3da41869","Type":"ContainerStarted","Data":"da71e64fb1ef0e05958001998905152cf192cd8cbccea76087ae208f9a1da63e"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.923432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" event={"ID":"2507b0ba-8cf5-4bdd-be01-f4c69188fd66","Type":"ContainerDied","Data":"510d28903e360f25c722473f0ed05bca9e5af5f8ab012d4b051be956c0f1c1b8"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.923455 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-75vnk" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.924711 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" event={"ID":"da61181a-e0e6-4273-9070-9c3048727707","Type":"ContainerDied","Data":"aff75d332fb089851d01a7685429995d049d6c2493ab084ad4de67165f5a983f"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.924773 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-spcl4" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.928356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399","Type":"ContainerStarted","Data":"b27101efeb3ef6efeed65a8ad3adf3ce48550833cc4fee414436587568695007"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.944318 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" event={"ID":"9cd2e351-6a59-49e5-89b3-d0d45981cfb3","Type":"ContainerStarted","Data":"6dc53e260c595a9879f1e41321226dc8f2bca3cc8bc340c19e9ee55098c5b543"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.944417 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.948292 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerStarted","Data":"80e5d3025c6d5fe4a28030191cceb93e94c859ede9bd97436b6a592eec5a348b"} Nov 28 13:49:40 crc kubenswrapper[4857]: I1128 13:49:40.977505 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" podStartSLOduration=3.581065862 podStartE2EDuration="18.97747961s" podCreationTimestamp="2025-11-28 13:49:22 +0000 UTC" firstStartedPulling="2025-11-28 13:49:23.653362596 +0000 UTC m=+1213.777304033" lastFinishedPulling="2025-11-28 13:49:39.049776324 +0000 UTC m=+1229.173717781" observedRunningTime="2025-11-28 13:49:40.960737219 +0000 UTC m=+1231.084678666" watchObservedRunningTime="2025-11-28 13:49:40.97747961 +0000 UTC m=+1231.101421067" Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.049238 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-75vnk"] Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.057231 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-75vnk"] Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.072586 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-spcl4"] Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.080549 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-spcl4"] Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.095455 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.308615 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.308718 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.308758 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.309327 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f8014f585ed82233daf2682d55748994b4ded11ee145a4ddfa59430be03e8701"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.309377 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://f8014f585ed82233daf2682d55748994b4ded11ee145a4ddfa59430be03e8701" gracePeriod=600 Nov 28 13:49:41 crc kubenswrapper[4857]: W1128 13:49:41.327938 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddceb7667_07bc_486b_b65f_c87427949ffd.slice/crio-d6ff6c51ddadb63010adbc67c900a1ae58dea0b41dfc83b6440ff9baa3003822 WatchSource:0}: Error finding container d6ff6c51ddadb63010adbc67c900a1ae58dea0b41dfc83b6440ff9baa3003822: Status 404 returned error can't find the container with id d6ff6c51ddadb63010adbc67c900a1ae58dea0b41dfc83b6440ff9baa3003822 Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.968209 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="f8014f585ed82233daf2682d55748994b4ded11ee145a4ddfa59430be03e8701" exitCode=0 Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.968268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"f8014f585ed82233daf2682d55748994b4ded11ee145a4ddfa59430be03e8701"} Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.968309 4857 scope.go:117] "RemoveContainer" containerID="cb45ea7c38d2a9151e38696a74767baa9022920c5afdfffe7b22b74cc2cdddc2" Nov 28 13:49:41 crc kubenswrapper[4857]: I1128 13:49:41.970588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"dceb7667-07bc-486b-b65f-c87427949ffd","Type":"ContainerStarted","Data":"d6ff6c51ddadb63010adbc67c900a1ae58dea0b41dfc83b6440ff9baa3003822"} Nov 28 13:49:42 crc kubenswrapper[4857]: I1128 13:49:42.242566 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2507b0ba-8cf5-4bdd-be01-f4c69188fd66" path="/var/lib/kubelet/pods/2507b0ba-8cf5-4bdd-be01-f4c69188fd66/volumes" Nov 28 13:49:42 crc kubenswrapper[4857]: I1128 13:49:42.243075 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da61181a-e0e6-4273-9070-9c3048727707" path="/var/lib/kubelet/pods/da61181a-e0e6-4273-9070-9c3048727707/volumes" Nov 28 13:49:48 crc kubenswrapper[4857]: I1128 13:49:48.118187 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:49:48 crc kubenswrapper[4857]: I1128 13:49:48.184391 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xg7zf"] Nov 28 13:49:52 crc kubenswrapper[4857]: E1128 13:49:52.809967 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified" Nov 28 13:49:52 crc kubenswrapper[4857]: E1128 13:49:52.810553 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-sb,Image:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5c7h585h5c7h9chbch64ch96hf9h599h674h88h84hffh5dh97h5fbh5c6h55h67fh54bh58ch76hb5h5ch7ch644h5bbh8chc6h68ch57bhbfq,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-sb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4msqh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(2dfd7b5e-9e1b-4f85-9933-2f3f55cee399): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:49:53 crc kubenswrapper[4857]: E1128 13:49:53.181013 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified" Nov 28 13:49:53 crc kubenswrapper[4857]: E1128 13:49:53.181700 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovn-controller,Image:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,Command:[ovn-controller --pidfile unix:/run/openvswitch/db.sock --certificate=/etc/pki/tls/certs/ovndb.crt --private-key=/etc/pki/tls/private/ovndb.key --ca-cert=/etc/pki/tls/certs/ovndbca.crt],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h647h645h54hcdhch5dchddh5dh5bbh95h669h648h76h98h5b6h54ch5dfh5f5h647h54h5cdh66ch58dh74h5b6h684hfbhcdh58bh689hd5q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:var-run,ReadOnly:false,MountPath:/var/run/openvswitch,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-run-ovn,ReadOnly:false,MountPath:/var/run/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:var-log-ovn,ReadOnly:false,MountPath:/var/log/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovn-controller-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9c2wc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_liveness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/ovn_controller_readiness.sh],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:30,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/share/ovn/scripts/ovn-ctl stop_controller],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[NET_ADMIN SYS_ADMIN SYS_NICE],Drop:[],},Privileged:*true,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-controller-zhh8w_openstack(de8730d5-79df-4483-a263-1dd72a7ee079): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:49:53 crc kubenswrapper[4857]: E1128 13:49:53.186592 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovn-controller-zhh8w" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.102606 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"75c7c292-3658-4264-b86b-2a825aeb9ad4","Type":"ContainerStarted","Data":"151e26b048646c370d13de720160a60c4f2f03b89357ec4d11d13d8e86d159a4"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.110546 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerStarted","Data":"8dabeda087aaf27aa60292912e3ecf4bbf1cddb7f5732a95a3564f65f7d30482"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.121067 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" event={"ID":"8ac57da8-31cb-4a73-a503-b9764ecbc24e","Type":"ContainerStarted","Data":"fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.121212 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerName="dnsmasq-dns" containerID="cri-o://fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc" gracePeriod=10 Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.121445 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.136443 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f1f87bb5-7cc1-4533-b145-d855e45205ca","Type":"ContainerStarted","Data":"27e0b4afa9ae671a1b222c98eb790a7935045df19723929282aaa365b28ec8a1"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.136567 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.146233 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f6c9b673-669e-464a-b012-8b39314e1990","Type":"ContainerStarted","Data":"94d359b86653eba30202b5b263492bd7c58d333129f67ab86a62151c212fd0cd"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.174641 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.177029 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" podStartSLOduration=16.382019706 podStartE2EDuration="32.177020312s" podCreationTimestamp="2025-11-28 13:49:22 +0000 UTC" firstStartedPulling="2025-11-28 13:49:23.295857864 +0000 UTC m=+1213.419799301" lastFinishedPulling="2025-11-28 13:49:39.09085847 +0000 UTC m=+1229.214799907" observedRunningTime="2025-11-28 13:49:54.176292853 +0000 UTC m=+1244.300234290" watchObservedRunningTime="2025-11-28 13:49:54.177020312 +0000 UTC m=+1244.300961739" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.179915 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"dceb7667-07bc-486b-b65f-c87427949ffd","Type":"ContainerStarted","Data":"a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a"} Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.182393 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"818836f7-7e48-4477-9e79-41c71000929d","Type":"ContainerStarted","Data":"774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8"} Nov 28 13:49:54 crc kubenswrapper[4857]: E1128 13:49:54.185465 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovn-controller\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified\\\"\"" pod="openstack/ovn-controller-zhh8w" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.199329 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=14.714401669 podStartE2EDuration="28.199313343s" podCreationTimestamp="2025-11-28 13:49:26 +0000 UTC" firstStartedPulling="2025-11-28 13:49:39.677703579 +0000 UTC m=+1229.801645016" lastFinishedPulling="2025-11-28 13:49:53.162615253 +0000 UTC m=+1243.286556690" observedRunningTime="2025-11-28 13:49:54.198335146 +0000 UTC m=+1244.322276593" watchObservedRunningTime="2025-11-28 13:49:54.199313343 +0000 UTC m=+1244.323254780" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.285263 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=12.415564673 podStartE2EDuration="26.285247296s" podCreationTimestamp="2025-11-28 13:49:28 +0000 UTC" firstStartedPulling="2025-11-28 13:49:39.663463025 +0000 UTC m=+1229.787404502" lastFinishedPulling="2025-11-28 13:49:53.533145688 +0000 UTC m=+1243.657087125" observedRunningTime="2025-11-28 13:49:54.279546133 +0000 UTC m=+1244.403487570" watchObservedRunningTime="2025-11-28 13:49:54.285247296 +0000 UTC m=+1244.409188733" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.817908 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.889864 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krsdv\" (UniqueName: \"kubernetes.io/projected/8ac57da8-31cb-4a73-a503-b9764ecbc24e-kube-api-access-krsdv\") pod \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.889978 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-dns-svc\") pod \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.890031 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-config\") pod \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\" (UID: \"8ac57da8-31cb-4a73-a503-b9764ecbc24e\") " Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.921229 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ac57da8-31cb-4a73-a503-b9764ecbc24e-kube-api-access-krsdv" (OuterVolumeSpecName: "kube-api-access-krsdv") pod "8ac57da8-31cb-4a73-a503-b9764ecbc24e" (UID: "8ac57da8-31cb-4a73-a503-b9764ecbc24e"). InnerVolumeSpecName "kube-api-access-krsdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.934243 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-config" (OuterVolumeSpecName: "config") pod "8ac57da8-31cb-4a73-a503-b9764ecbc24e" (UID: "8ac57da8-31cb-4a73-a503-b9764ecbc24e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.934402 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8ac57da8-31cb-4a73-a503-b9764ecbc24e" (UID: "8ac57da8-31cb-4a73-a503-b9764ecbc24e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.991523 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.991547 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ac57da8-31cb-4a73-a503-b9764ecbc24e-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:54 crc kubenswrapper[4857]: I1128 13:49:54.991557 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krsdv\" (UniqueName: \"kubernetes.io/projected/8ac57da8-31cb-4a73-a503-b9764ecbc24e-kube-api-access-krsdv\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.190301 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e5550a25-04ef-4dde-afd4-627f1df97a90","Type":"ContainerStarted","Data":"07476080bfc0082bdd51a54d167f1b8f0849c9dddec8d7f0531eba67b8803b47"} Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.192338 4857 generic.go:334] "Generic (PLEG): container finished" podID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerID="8dabeda087aaf27aa60292912e3ecf4bbf1cddb7f5732a95a3564f65f7d30482" exitCode=0 Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.192400 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerDied","Data":"8dabeda087aaf27aa60292912e3ecf4bbf1cddb7f5732a95a3564f65f7d30482"} Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.196553 4857 generic.go:334] "Generic (PLEG): container finished" podID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerID="fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc" exitCode=0 Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.196619 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" event={"ID":"8ac57da8-31cb-4a73-a503-b9764ecbc24e","Type":"ContainerDied","Data":"fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc"} Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.196645 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" event={"ID":"8ac57da8-31cb-4a73-a503-b9764ecbc24e","Type":"ContainerDied","Data":"6766686b10af1a2bdcf06bc6270406677ab0f2bbec1c476ed4a33c7deba8323a"} Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.196664 4857 scope.go:117] "RemoveContainer" containerID="fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.196814 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-xg7zf" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.206520 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e5ec18e7-6719-46dd-b580-303f3da41869","Type":"ContainerStarted","Data":"2ae1f1de2ee889c449e3ff3c827e646d41938be57fdf8267c513587b8dbd2ecb"} Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.206650 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.227899 4857 scope.go:117] "RemoveContainer" containerID="7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.247445 4857 scope.go:117] "RemoveContainer" containerID="fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc" Nov 28 13:49:55 crc kubenswrapper[4857]: E1128 13:49:55.252165 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc\": container with ID starting with fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc not found: ID does not exist" containerID="fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.252656 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc"} err="failed to get container status \"fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc\": rpc error: code = NotFound desc = could not find container \"fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc\": container with ID starting with fa18d86f76b78da6ea5bb0756ecda609b991f58c656c7e7bf2e32b7bb1cadfcc not found: ID does not exist" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.252688 4857 scope.go:117] "RemoveContainer" containerID="7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736" Nov 28 13:49:55 crc kubenswrapper[4857]: E1128 13:49:55.254134 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736\": container with ID starting with 7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736 not found: ID does not exist" containerID="7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.254180 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736"} err="failed to get container status \"7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736\": rpc error: code = NotFound desc = could not find container \"7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736\": container with ID starting with 7547b463a016863ab2b566da929f7328b97ae2da05ab80d8de9c75430b1ec736 not found: ID does not exist" Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.285291 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xg7zf"] Nov 28 13:49:55 crc kubenswrapper[4857]: I1128 13:49:55.291410 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-xg7zf"] Nov 28 13:49:56 crc kubenswrapper[4857]: I1128 13:49:56.215660 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerStarted","Data":"a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17"} Nov 28 13:49:56 crc kubenswrapper[4857]: I1128 13:49:56.238727 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" path="/var/lib/kubelet/pods/8ac57da8-31cb-4a73-a503-b9764ecbc24e/volumes" Nov 28 13:49:57 crc kubenswrapper[4857]: E1128 13:49:57.028236 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.241919 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerStarted","Data":"b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207"} Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.242956 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.243062 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.244120 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"dceb7667-07bc-486b-b65f-c87427949ffd","Type":"ContainerStarted","Data":"97ea011d40feafd9e10daf1c4217debdca8ad06ea7b693c5eca7afa22e62ac1f"} Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.246306 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399","Type":"ContainerStarted","Data":"028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432"} Nov 28 13:49:57 crc kubenswrapper[4857]: E1128 13:49:57.247525 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.276051 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-hgm54" podStartSLOduration=13.649184706 podStartE2EDuration="26.276036021s" podCreationTimestamp="2025-11-28 13:49:31 +0000 UTC" firstStartedPulling="2025-11-28 13:49:40.904845345 +0000 UTC m=+1231.028786782" lastFinishedPulling="2025-11-28 13:49:53.53169666 +0000 UTC m=+1243.655638097" observedRunningTime="2025-11-28 13:49:57.272068784 +0000 UTC m=+1247.396010221" watchObservedRunningTime="2025-11-28 13:49:57.276036021 +0000 UTC m=+1247.399977458" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.298299 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=9.805202923 podStartE2EDuration="25.298282909s" podCreationTimestamp="2025-11-28 13:49:32 +0000 UTC" firstStartedPulling="2025-11-28 13:49:41.333544526 +0000 UTC m=+1231.457485963" lastFinishedPulling="2025-11-28 13:49:56.826624512 +0000 UTC m=+1246.950565949" observedRunningTime="2025-11-28 13:49:57.292644248 +0000 UTC m=+1247.416585685" watchObservedRunningTime="2025-11-28 13:49:57.298282909 +0000 UTC m=+1247.422224346" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.702991 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:57 crc kubenswrapper[4857]: I1128 13:49:57.768013 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.254692 4857 generic.go:334] "Generic (PLEG): container finished" podID="f6c9b673-669e-464a-b012-8b39314e1990" containerID="94d359b86653eba30202b5b263492bd7c58d333129f67ab86a62151c212fd0cd" exitCode=0 Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.254914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f6c9b673-669e-464a-b012-8b39314e1990","Type":"ContainerDied","Data":"94d359b86653eba30202b5b263492bd7c58d333129f67ab86a62151c212fd0cd"} Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.256106 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:58 crc kubenswrapper[4857]: E1128 13:49:58.260222 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-sb\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.296791 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.590540 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-llgjp"] Nov 28 13:49:58 crc kubenswrapper[4857]: E1128 13:49:58.595491 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerName="init" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.595531 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerName="init" Nov 28 13:49:58 crc kubenswrapper[4857]: E1128 13:49:58.595561 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerName="dnsmasq-dns" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.595580 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerName="dnsmasq-dns" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.595737 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ac57da8-31cb-4a73-a503-b9764ecbc24e" containerName="dnsmasq-dns" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.596647 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.598060 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.603473 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-llgjp"] Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.641797 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-f67hs"] Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.642725 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.650971 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.662802 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-f67hs"] Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795343 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795375 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-combined-ca-bundle\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795395 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srnhk\" (UniqueName: \"kubernetes.io/projected/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-kube-api-access-srnhk\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795422 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795437 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovn-rundir\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795453 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c117ad8-9d37-4e85-b408-e2d77c8331df-config\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795473 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2mpc\" (UniqueName: \"kubernetes.io/projected/7c117ad8-9d37-4e85-b408-e2d77c8331df-kube-api-access-m2mpc\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795543 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-config\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.795573 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovs-rundir\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.883994 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-llgjp"] Nov 28 13:49:58 crc kubenswrapper[4857]: E1128 13:49:58.884531 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc kube-api-access-srnhk ovsdbserver-nb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" podUID="82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896744 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896808 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-config\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896846 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovs-rundir\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896896 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896912 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-combined-ca-bundle\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896931 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srnhk\" (UniqueName: \"kubernetes.io/projected/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-kube-api-access-srnhk\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896968 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.896984 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovn-rundir\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.897279 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovn-rundir\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.898091 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.898676 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-config\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.898729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovs-rundir\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.900086 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c117ad8-9d37-4e85-b408-e2d77c8331df-config\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.900172 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2mpc\" (UniqueName: \"kubernetes.io/projected/7c117ad8-9d37-4e85-b408-e2d77c8331df-kube-api-access-m2mpc\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.901234 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c117ad8-9d37-4e85-b408-e2d77c8331df-config\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.901927 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.916162 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-combined-ca-bundle\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.929494 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.932350 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srnhk\" (UniqueName: \"kubernetes.io/projected/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-kube-api-access-srnhk\") pod \"dnsmasq-dns-5bf47b49b7-llgjp\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.934421 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-pvvqk"] Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.936746 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.948995 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.950237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2mpc\" (UniqueName: \"kubernetes.io/projected/7c117ad8-9d37-4e85-b408-e2d77c8331df-kube-api-access-m2mpc\") pod \"ovn-controller-metrics-f67hs\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.960113 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.976778 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pvvqk"] Nov 28 13:49:58 crc kubenswrapper[4857]: I1128 13:49:58.980571 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.005143 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-dns-svc\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.006067 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6vmq\" (UniqueName: \"kubernetes.io/projected/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-kube-api-access-l6vmq\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.006371 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.006485 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-config\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.006723 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.108975 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6vmq\" (UniqueName: \"kubernetes.io/projected/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-kube-api-access-l6vmq\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.109074 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.109094 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-config\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.109133 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.109179 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-dns-svc\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.110134 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-dns-svc\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.110630 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.111303 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-config\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.111594 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.130884 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6vmq\" (UniqueName: \"kubernetes.io/projected/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-kube-api-access-l6vmq\") pod \"dnsmasq-dns-8554648995-pvvqk\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.263982 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f6c9b673-669e-464a-b012-8b39314e1990","Type":"ContainerStarted","Data":"3d5a1e70902743d5830edbb49ef840b594da301bdcf85c3dfb36991d40ad00b1"} Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.270239 4857 generic.go:334] "Generic (PLEG): container finished" podID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerID="151e26b048646c370d13de720160a60c4f2f03b89357ec4d11d13d8e86d159a4" exitCode=0 Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.271025 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"75c7c292-3658-4264-b86b-2a825aeb9ad4","Type":"ContainerDied","Data":"151e26b048646c370d13de720160a60c4f2f03b89357ec4d11d13d8e86d159a4"} Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.271581 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.287237 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.294863 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=22.448017822 podStartE2EDuration="36.294838149s" podCreationTimestamp="2025-11-28 13:49:23 +0000 UTC" firstStartedPulling="2025-11-28 13:49:39.687138533 +0000 UTC m=+1229.811079980" lastFinishedPulling="2025-11-28 13:49:53.53395886 +0000 UTC m=+1243.657900307" observedRunningTime="2025-11-28 13:49:59.284898331 +0000 UTC m=+1249.408839768" watchObservedRunningTime="2025-11-28 13:49:59.294838149 +0000 UTC m=+1249.418779586" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.305498 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.419144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-ovsdbserver-nb\") pod \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.419240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-config\") pod \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.419272 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srnhk\" (UniqueName: \"kubernetes.io/projected/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-kube-api-access-srnhk\") pod \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.419455 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-dns-svc\") pod \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\" (UID: \"82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d\") " Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.420872 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d" (UID: "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.420878 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-config" (OuterVolumeSpecName: "config") pod "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d" (UID: "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.421209 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d" (UID: "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.422855 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-kube-api-access-srnhk" (OuterVolumeSpecName: "kube-api-access-srnhk") pod "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d" (UID: "82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d"). InnerVolumeSpecName "kube-api-access-srnhk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.449134 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-f67hs"] Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.521353 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.521381 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.521391 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.521401 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srnhk\" (UniqueName: \"kubernetes.io/projected/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d-kube-api-access-srnhk\") on node \"crc\" DevicePath \"\"" Nov 28 13:49:59 crc kubenswrapper[4857]: I1128 13:49:59.817890 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pvvqk"] Nov 28 13:49:59 crc kubenswrapper[4857]: W1128 13:49:59.822732 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25ef3505_47be_41dd_88e8_0fdfa2fc80f3.slice/crio-223d08f4fcb76304f255056fc37f6262c877cb4757fdfa8cd88b2d316281ab4d WatchSource:0}: Error finding container 223d08f4fcb76304f255056fc37f6262c877cb4757fdfa8cd88b2d316281ab4d: Status 404 returned error can't find the container with id 223d08f4fcb76304f255056fc37f6262c877cb4757fdfa8cd88b2d316281ab4d Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.288213 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"75c7c292-3658-4264-b86b-2a825aeb9ad4","Type":"ContainerStarted","Data":"7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13"} Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.297456 4857 generic.go:334] "Generic (PLEG): container finished" podID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerID="de043174936e4340eb3a043d8834dfdea9015143a56b20e9073cf5e819ea79bf" exitCode=0 Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.297540 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pvvqk" event={"ID":"25ef3505-47be-41dd-88e8-0fdfa2fc80f3","Type":"ContainerDied","Data":"de043174936e4340eb3a043d8834dfdea9015143a56b20e9073cf5e819ea79bf"} Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.297601 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pvvqk" event={"ID":"25ef3505-47be-41dd-88e8-0fdfa2fc80f3","Type":"ContainerStarted","Data":"223d08f4fcb76304f255056fc37f6262c877cb4757fdfa8cd88b2d316281ab4d"} Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.301170 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-f67hs" event={"ID":"7c117ad8-9d37-4e85-b408-e2d77c8331df","Type":"ContainerStarted","Data":"0e846cecd8668f3338b50a23dd0e5ca393a261a6642af341de9d050961ee27c0"} Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.301215 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-f67hs" event={"ID":"7c117ad8-9d37-4e85-b408-e2d77c8331df","Type":"ContainerStarted","Data":"ed9e5d685bfe843d603e9dac303ba68b35babeb8ffd1730ef9c216597f3e63b2"} Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.301178 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-llgjp" Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.316810 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=21.415600229 podStartE2EDuration="35.31679187s" podCreationTimestamp="2025-11-28 13:49:25 +0000 UTC" firstStartedPulling="2025-11-28 13:49:39.684975025 +0000 UTC m=+1229.808916462" lastFinishedPulling="2025-11-28 13:49:53.586166666 +0000 UTC m=+1243.710108103" observedRunningTime="2025-11-28 13:50:00.309853433 +0000 UTC m=+1250.433794870" watchObservedRunningTime="2025-11-28 13:50:00.31679187 +0000 UTC m=+1250.440733307" Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.342813 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-f67hs" podStartSLOduration=2.3427878 podStartE2EDuration="2.3427878s" podCreationTimestamp="2025-11-28 13:49:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:50:00.332690518 +0000 UTC m=+1250.456631965" watchObservedRunningTime="2025-11-28 13:50:00.3427878 +0000 UTC m=+1250.466729247" Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.419312 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-llgjp"] Nov 28 13:50:00 crc kubenswrapper[4857]: I1128 13:50:00.426013 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-llgjp"] Nov 28 13:50:01 crc kubenswrapper[4857]: I1128 13:50:01.311685 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pvvqk" event={"ID":"25ef3505-47be-41dd-88e8-0fdfa2fc80f3","Type":"ContainerStarted","Data":"3d625bee275ad6c9557322ab275acd4cd9c13e4cfd746a63de44d8800f5d4262"} Nov 28 13:50:01 crc kubenswrapper[4857]: I1128 13:50:01.333275 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-pvvqk" podStartSLOduration=3.333250864 podStartE2EDuration="3.333250864s" podCreationTimestamp="2025-11-28 13:49:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:50:01.328491956 +0000 UTC m=+1251.452433393" watchObservedRunningTime="2025-11-28 13:50:01.333250864 +0000 UTC m=+1251.457192301" Nov 28 13:50:02 crc kubenswrapper[4857]: I1128 13:50:02.237910 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d" path="/var/lib/kubelet/pods/82fbc158-d6e0-4fa6-ab3b-cd9ebabc985d/volumes" Nov 28 13:50:02 crc kubenswrapper[4857]: I1128 13:50:02.238391 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 13:50:02 crc kubenswrapper[4857]: I1128 13:50:02.318088 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:50:05 crc kubenswrapper[4857]: I1128 13:50:05.314981 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 13:50:05 crc kubenswrapper[4857]: I1128 13:50:05.315281 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 13:50:05 crc kubenswrapper[4857]: I1128 13:50:05.415494 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 13:50:05 crc kubenswrapper[4857]: I1128 13:50:05.508091 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.840976 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.841256 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.892205 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-c1b2-account-create-update-4qfg8"] Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.893215 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.894857 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.908192 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c1b2-account-create-update-4qfg8"] Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.961295 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-thjsw"] Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.962811 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.963159 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:06 crc kubenswrapper[4857]: I1128 13:50:06.971268 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-thjsw"] Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.062010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62737fad-a99b-41e3-9333-f3dd199563a7-operator-scripts\") pod \"keystone-db-create-thjsw\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.062354 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6wjf\" (UniqueName: \"kubernetes.io/projected/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-kube-api-access-d6wjf\") pod \"keystone-c1b2-account-create-update-4qfg8\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.062586 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jnpss\" (UniqueName: \"kubernetes.io/projected/62737fad-a99b-41e3-9333-f3dd199563a7-kube-api-access-jnpss\") pod \"keystone-db-create-thjsw\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.062822 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-operator-scripts\") pod \"keystone-c1b2-account-create-update-4qfg8\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.157851 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-6db4d"] Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.159158 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.164171 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62737fad-a99b-41e3-9333-f3dd199563a7-operator-scripts\") pod \"keystone-db-create-thjsw\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.164213 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6wjf\" (UniqueName: \"kubernetes.io/projected/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-kube-api-access-d6wjf\") pod \"keystone-c1b2-account-create-update-4qfg8\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.164244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jnpss\" (UniqueName: \"kubernetes.io/projected/62737fad-a99b-41e3-9333-f3dd199563a7-kube-api-access-jnpss\") pod \"keystone-db-create-thjsw\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.164285 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-operator-scripts\") pod \"keystone-c1b2-account-create-update-4qfg8\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.165229 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-operator-scripts\") pod \"keystone-c1b2-account-create-update-4qfg8\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.165452 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62737fad-a99b-41e3-9333-f3dd199563a7-operator-scripts\") pod \"keystone-db-create-thjsw\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.178582 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-6db4d"] Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.190581 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jnpss\" (UniqueName: \"kubernetes.io/projected/62737fad-a99b-41e3-9333-f3dd199563a7-kube-api-access-jnpss\") pod \"keystone-db-create-thjsw\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.198790 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6wjf\" (UniqueName: \"kubernetes.io/projected/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-kube-api-access-d6wjf\") pod \"keystone-c1b2-account-create-update-4qfg8\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.225685 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.270293 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f611-account-create-update-24frn"] Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.271504 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.276102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9fcz\" (UniqueName: \"kubernetes.io/projected/0832be69-9adb-40ba-a288-42ec7741fc19-kube-api-access-b9fcz\") pod \"placement-db-create-6db4d\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.276251 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0832be69-9adb-40ba-a288-42ec7741fc19-operator-scripts\") pod \"placement-db-create-6db4d\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.279412 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.283916 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.320852 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f611-account-create-update-24frn"] Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.377448 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c8cca0-a390-4db6-978d-d32af42ac290-operator-scripts\") pod \"placement-f611-account-create-update-24frn\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.377569 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0832be69-9adb-40ba-a288-42ec7741fc19-operator-scripts\") pod \"placement-db-create-6db4d\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.377619 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9fcz\" (UniqueName: \"kubernetes.io/projected/0832be69-9adb-40ba-a288-42ec7741fc19-kube-api-access-b9fcz\") pod \"placement-db-create-6db4d\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.377640 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vv6c\" (UniqueName: \"kubernetes.io/projected/b2c8cca0-a390-4db6-978d-d32af42ac290-kube-api-access-6vv6c\") pod \"placement-f611-account-create-update-24frn\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.380789 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0832be69-9adb-40ba-a288-42ec7741fc19-operator-scripts\") pod \"placement-db-create-6db4d\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.411756 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9fcz\" (UniqueName: \"kubernetes.io/projected/0832be69-9adb-40ba-a288-42ec7741fc19-kube-api-access-b9fcz\") pod \"placement-db-create-6db4d\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.479593 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vv6c\" (UniqueName: \"kubernetes.io/projected/b2c8cca0-a390-4db6-978d-d32af42ac290-kube-api-access-6vv6c\") pod \"placement-f611-account-create-update-24frn\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.479916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c8cca0-a390-4db6-978d-d32af42ac290-operator-scripts\") pod \"placement-f611-account-create-update-24frn\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.481134 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6db4d" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.481280 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c8cca0-a390-4db6-978d-d32af42ac290-operator-scripts\") pod \"placement-f611-account-create-update-24frn\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.503810 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vv6c\" (UniqueName: \"kubernetes.io/projected/b2c8cca0-a390-4db6-978d-d32af42ac290-kube-api-access-6vv6c\") pod \"placement-f611-account-create-update-24frn\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.509807 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 13:50:07 crc kubenswrapper[4857]: I1128 13:50:07.684294 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.049680 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-thjsw"] Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.068275 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-c1b2-account-create-update-4qfg8"] Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.079092 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-6db4d"] Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.375468 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c1b2-account-create-update-4qfg8" event={"ID":"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba","Type":"ContainerStarted","Data":"c0320d40b0a8c57cd5e4e1d5a6e118ab755a15b53932caf49c8d5778ff4ec59c"} Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.377069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6db4d" event={"ID":"0832be69-9adb-40ba-a288-42ec7741fc19","Type":"ContainerStarted","Data":"db1a7cd14a21bbf763b7ce8ed074aef35decb37529d326e0b68e724aeef549c5"} Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.379532 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w" event={"ID":"de8730d5-79df-4483-a263-1dd72a7ee079","Type":"ContainerStarted","Data":"7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e"} Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.380739 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-zhh8w" Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.385113 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-thjsw" event={"ID":"62737fad-a99b-41e3-9333-f3dd199563a7","Type":"ContainerStarted","Data":"cebb43c89e8c652d06298486abdb513d986747ec70932f5f374c742ceeb7cc9f"} Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.385163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-thjsw" event={"ID":"62737fad-a99b-41e3-9333-f3dd199563a7","Type":"ContainerStarted","Data":"f7c96057eaf768c5096ecedbfe6a92a036600b8dc1c2bc1d22b5e37422464582"} Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.397234 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f611-account-create-update-24frn"] Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.409553 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-zhh8w" podStartSLOduration=9.357372996 podStartE2EDuration="37.409539214s" podCreationTimestamp="2025-11-28 13:49:31 +0000 UTC" firstStartedPulling="2025-11-28 13:49:39.687588485 +0000 UTC m=+1229.811529922" lastFinishedPulling="2025-11-28 13:50:07.739754703 +0000 UTC m=+1257.863696140" observedRunningTime="2025-11-28 13:50:08.40454563 +0000 UTC m=+1258.528487077" watchObservedRunningTime="2025-11-28 13:50:08.409539214 +0000 UTC m=+1258.533480651" Nov 28 13:50:08 crc kubenswrapper[4857]: I1128 13:50:08.431910 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-thjsw" podStartSLOduration=2.431892326 podStartE2EDuration="2.431892326s" podCreationTimestamp="2025-11-28 13:50:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:50:08.429640375 +0000 UTC m=+1258.553581812" watchObservedRunningTime="2025-11-28 13:50:08.431892326 +0000 UTC m=+1258.555833763" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.020688 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pvvqk"] Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.021221 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-pvvqk" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="dnsmasq-dns" containerID="cri-o://3d625bee275ad6c9557322ab275acd4cd9c13e4cfd746a63de44d8800f5d4262" gracePeriod=10 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.039556 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.066657 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-52vgm"] Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.068216 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.094378 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-52vgm"] Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.235116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.235171 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.235232 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdhzb\" (UniqueName: \"kubernetes.io/projected/03eee55b-b660-45e7-91db-4491a89910ad-kube-api-access-mdhzb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.235283 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.235308 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-config\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.306773 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-pvvqk" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.114:5353: connect: connection refused" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.337869 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdhzb\" (UniqueName: \"kubernetes.io/projected/03eee55b-b660-45e7-91db-4491a89910ad-kube-api-access-mdhzb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.338008 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.338047 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-config\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.338109 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.338155 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.339444 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.342142 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.342983 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.345073 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-config\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.371411 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdhzb\" (UniqueName: \"kubernetes.io/projected/03eee55b-b660-45e7-91db-4491a89910ad-kube-api-access-mdhzb\") pod \"dnsmasq-dns-b8fbc5445-52vgm\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.397805 4857 generic.go:334] "Generic (PLEG): container finished" podID="ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" containerID="400c9c0eea1ca4656bcb7228bc24857d00720d04b36a80f5d974335993c0803f" exitCode=0 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.397877 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c1b2-account-create-update-4qfg8" event={"ID":"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba","Type":"ContainerDied","Data":"400c9c0eea1ca4656bcb7228bc24857d00720d04b36a80f5d974335993c0803f"} Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.406570 4857 generic.go:334] "Generic (PLEG): container finished" podID="0832be69-9adb-40ba-a288-42ec7741fc19" containerID="8a5e02322a0c92ef92403b10c46418a6d8a41c31a9a29fb0afbfbbabaa3daa7d" exitCode=0 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.406669 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6db4d" event={"ID":"0832be69-9adb-40ba-a288-42ec7741fc19","Type":"ContainerDied","Data":"8a5e02322a0c92ef92403b10c46418a6d8a41c31a9a29fb0afbfbbabaa3daa7d"} Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.409727 4857 generic.go:334] "Generic (PLEG): container finished" podID="b2c8cca0-a390-4db6-978d-d32af42ac290" containerID="42941d6c5b0a04155e45168b57ba9dd956d33497bb2e71cd6e0569353039e78f" exitCode=0 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.409816 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f611-account-create-update-24frn" event={"ID":"b2c8cca0-a390-4db6-978d-d32af42ac290","Type":"ContainerDied","Data":"42941d6c5b0a04155e45168b57ba9dd956d33497bb2e71cd6e0569353039e78f"} Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.409847 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f611-account-create-update-24frn" event={"ID":"b2c8cca0-a390-4db6-978d-d32af42ac290","Type":"ContainerStarted","Data":"371a606c340092e497d2c113fec0b0b95c2621f63dcdf6fce11779af393265f8"} Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.413684 4857 generic.go:334] "Generic (PLEG): container finished" podID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerID="3d625bee275ad6c9557322ab275acd4cd9c13e4cfd746a63de44d8800f5d4262" exitCode=0 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.413755 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pvvqk" event={"ID":"25ef3505-47be-41dd-88e8-0fdfa2fc80f3","Type":"ContainerDied","Data":"3d625bee275ad6c9557322ab275acd4cd9c13e4cfd746a63de44d8800f5d4262"} Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.415983 4857 generic.go:334] "Generic (PLEG): container finished" podID="62737fad-a99b-41e3-9333-f3dd199563a7" containerID="cebb43c89e8c652d06298486abdb513d986747ec70932f5f374c742ceeb7cc9f" exitCode=0 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.416217 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-thjsw" event={"ID":"62737fad-a99b-41e3-9333-f3dd199563a7","Type":"ContainerDied","Data":"cebb43c89e8c652d06298486abdb513d986747ec70932f5f374c742ceeb7cc9f"} Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.442503 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.568587 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.645755 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-nb\") pod \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.646182 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-sb\") pod \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.646242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-dns-svc\") pod \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.646415 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6vmq\" (UniqueName: \"kubernetes.io/projected/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-kube-api-access-l6vmq\") pod \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.646467 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-config\") pod \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\" (UID: \"25ef3505-47be-41dd-88e8-0fdfa2fc80f3\") " Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.651171 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-kube-api-access-l6vmq" (OuterVolumeSpecName: "kube-api-access-l6vmq") pod "25ef3505-47be-41dd-88e8-0fdfa2fc80f3" (UID: "25ef3505-47be-41dd-88e8-0fdfa2fc80f3"). InnerVolumeSpecName "kube-api-access-l6vmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.695586 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "25ef3505-47be-41dd-88e8-0fdfa2fc80f3" (UID: "25ef3505-47be-41dd-88e8-0fdfa2fc80f3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.697115 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "25ef3505-47be-41dd-88e8-0fdfa2fc80f3" (UID: "25ef3505-47be-41dd-88e8-0fdfa2fc80f3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.701566 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-config" (OuterVolumeSpecName: "config") pod "25ef3505-47be-41dd-88e8-0fdfa2fc80f3" (UID: "25ef3505-47be-41dd-88e8-0fdfa2fc80f3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.733659 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "25ef3505-47be-41dd-88e8-0fdfa2fc80f3" (UID: "25ef3505-47be-41dd-88e8-0fdfa2fc80f3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.751081 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.751123 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.751136 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.751147 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6vmq\" (UniqueName: \"kubernetes.io/projected/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-kube-api-access-l6vmq\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.751161 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25ef3505-47be-41dd-88e8-0fdfa2fc80f3-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:09 crc kubenswrapper[4857]: W1128 13:50:09.950732 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod03eee55b_b660_45e7_91db_4491a89910ad.slice/crio-8721d9c897df39ee23bffb1d3c58e7ebf746fed236910d50fd3130807eb10252 WatchSource:0}: Error finding container 8721d9c897df39ee23bffb1d3c58e7ebf746fed236910d50fd3130807eb10252: Status 404 returned error can't find the container with id 8721d9c897df39ee23bffb1d3c58e7ebf746fed236910d50fd3130807eb10252 Nov 28 13:50:09 crc kubenswrapper[4857]: I1128 13:50:09.952075 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-52vgm"] Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.169537 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.169856 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="dnsmasq-dns" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.169868 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="dnsmasq-dns" Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.169883 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="init" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.169889 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="init" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.170085 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" containerName="dnsmasq-dns" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.176496 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.184589 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.184782 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.184891 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-dg8jt" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.185639 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.194210 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.270465 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-lock\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.271025 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kjqcd\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-kube-api-access-kjqcd\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.271233 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-cache\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.271372 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.271492 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.373833 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-cache\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.373921 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.373978 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.374023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-lock\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.374130 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kjqcd\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-kube-api-access-kjqcd\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.380794 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-cache\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.381098 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-lock\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.382028 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.382051 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.382088 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift podName:7dacf187-3671-4114-a93e-e2296c8c20b2 nodeName:}" failed. No retries permitted until 2025-11-28 13:50:10.882073167 +0000 UTC m=+1261.006014604 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift") pod "swift-storage-0" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2") : configmap "swift-ring-files" not found Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.382241 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.398142 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kjqcd\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-kube-api-access-kjqcd\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.406560 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.424680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pvvqk" event={"ID":"25ef3505-47be-41dd-88e8-0fdfa2fc80f3","Type":"ContainerDied","Data":"223d08f4fcb76304f255056fc37f6262c877cb4757fdfa8cd88b2d316281ab4d"} Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.424928 4857 scope.go:117] "RemoveContainer" containerID="3d625bee275ad6c9557322ab275acd4cd9c13e4cfd746a63de44d8800f5d4262" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.424699 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pvvqk" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.426630 4857 generic.go:334] "Generic (PLEG): container finished" podID="03eee55b-b660-45e7-91db-4491a89910ad" containerID="bed62605c70428e6de44941ccf888faecc30e99f6c13e0d0cef8fbd5f61f11da" exitCode=0 Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.427053 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" event={"ID":"03eee55b-b660-45e7-91db-4491a89910ad","Type":"ContainerDied","Data":"bed62605c70428e6de44941ccf888faecc30e99f6c13e0d0cef8fbd5f61f11da"} Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.427096 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" event={"ID":"03eee55b-b660-45e7-91db-4491a89910ad","Type":"ContainerStarted","Data":"8721d9c897df39ee23bffb1d3c58e7ebf746fed236910d50fd3130807eb10252"} Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.597092 4857 scope.go:117] "RemoveContainer" containerID="de043174936e4340eb3a043d8834dfdea9015143a56b20e9073cf5e819ea79bf" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.604079 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pvvqk"] Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.615121 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pvvqk"] Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.833638 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.885832 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62737fad-a99b-41e3-9333-f3dd199563a7-operator-scripts\") pod \"62737fad-a99b-41e3-9333-f3dd199563a7\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.885877 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jnpss\" (UniqueName: \"kubernetes.io/projected/62737fad-a99b-41e3-9333-f3dd199563a7-kube-api-access-jnpss\") pod \"62737fad-a99b-41e3-9333-f3dd199563a7\" (UID: \"62737fad-a99b-41e3-9333-f3dd199563a7\") " Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.886239 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.886401 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.886415 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:50:10 crc kubenswrapper[4857]: E1128 13:50:10.886460 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift podName:7dacf187-3671-4114-a93e-e2296c8c20b2 nodeName:}" failed. No retries permitted until 2025-11-28 13:50:11.886447204 +0000 UTC m=+1262.010388641 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift") pod "swift-storage-0" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2") : configmap "swift-ring-files" not found Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.886723 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62737fad-a99b-41e3-9333-f3dd199563a7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "62737fad-a99b-41e3-9333-f3dd199563a7" (UID: "62737fad-a99b-41e3-9333-f3dd199563a7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.893183 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62737fad-a99b-41e3-9333-f3dd199563a7-kube-api-access-jnpss" (OuterVolumeSpecName: "kube-api-access-jnpss") pod "62737fad-a99b-41e3-9333-f3dd199563a7" (UID: "62737fad-a99b-41e3-9333-f3dd199563a7"). InnerVolumeSpecName "kube-api-access-jnpss". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.990774 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62737fad-a99b-41e3-9333-f3dd199563a7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:10 crc kubenswrapper[4857]: I1128 13:50:10.990801 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jnpss\" (UniqueName: \"kubernetes.io/projected/62737fad-a99b-41e3-9333-f3dd199563a7-kube-api-access-jnpss\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.001456 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.008308 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.021381 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6db4d" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.092468 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6wjf\" (UniqueName: \"kubernetes.io/projected/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-kube-api-access-d6wjf\") pod \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.092547 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c8cca0-a390-4db6-978d-d32af42ac290-operator-scripts\") pod \"b2c8cca0-a390-4db6-978d-d32af42ac290\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.092588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-operator-scripts\") pod \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\" (UID: \"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba\") " Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.092632 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9fcz\" (UniqueName: \"kubernetes.io/projected/0832be69-9adb-40ba-a288-42ec7741fc19-kube-api-access-b9fcz\") pod \"0832be69-9adb-40ba-a288-42ec7741fc19\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.092670 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0832be69-9adb-40ba-a288-42ec7741fc19-operator-scripts\") pod \"0832be69-9adb-40ba-a288-42ec7741fc19\" (UID: \"0832be69-9adb-40ba-a288-42ec7741fc19\") " Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.092730 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6vv6c\" (UniqueName: \"kubernetes.io/projected/b2c8cca0-a390-4db6-978d-d32af42ac290-kube-api-access-6vv6c\") pod \"b2c8cca0-a390-4db6-978d-d32af42ac290\" (UID: \"b2c8cca0-a390-4db6-978d-d32af42ac290\") " Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.094208 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" (UID: "ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.095442 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b2c8cca0-a390-4db6-978d-d32af42ac290-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b2c8cca0-a390-4db6-978d-d32af42ac290" (UID: "b2c8cca0-a390-4db6-978d-d32af42ac290"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.097313 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2c8cca0-a390-4db6-978d-d32af42ac290-kube-api-access-6vv6c" (OuterVolumeSpecName: "kube-api-access-6vv6c") pod "b2c8cca0-a390-4db6-978d-d32af42ac290" (UID: "b2c8cca0-a390-4db6-978d-d32af42ac290"). InnerVolumeSpecName "kube-api-access-6vv6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.099671 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0832be69-9adb-40ba-a288-42ec7741fc19-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0832be69-9adb-40ba-a288-42ec7741fc19" (UID: "0832be69-9adb-40ba-a288-42ec7741fc19"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.101162 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0832be69-9adb-40ba-a288-42ec7741fc19-kube-api-access-b9fcz" (OuterVolumeSpecName: "kube-api-access-b9fcz") pod "0832be69-9adb-40ba-a288-42ec7741fc19" (UID: "0832be69-9adb-40ba-a288-42ec7741fc19"). InnerVolumeSpecName "kube-api-access-b9fcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.103048 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-kube-api-access-d6wjf" (OuterVolumeSpecName: "kube-api-access-d6wjf") pod "ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" (UID: "ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba"). InnerVolumeSpecName "kube-api-access-d6wjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.195047 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6wjf\" (UniqueName: \"kubernetes.io/projected/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-kube-api-access-d6wjf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.195082 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b2c8cca0-a390-4db6-978d-d32af42ac290-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.195092 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.195101 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9fcz\" (UniqueName: \"kubernetes.io/projected/0832be69-9adb-40ba-a288-42ec7741fc19-kube-api-access-b9fcz\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.195110 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0832be69-9adb-40ba-a288-42ec7741fc19-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.195118 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6vv6c\" (UniqueName: \"kubernetes.io/projected/b2c8cca0-a390-4db6-978d-d32af42ac290-kube-api-access-6vv6c\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.435843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-thjsw" event={"ID":"62737fad-a99b-41e3-9333-f3dd199563a7","Type":"ContainerDied","Data":"f7c96057eaf768c5096ecedbfe6a92a036600b8dc1c2bc1d22b5e37422464582"} Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.436189 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7c96057eaf768c5096ecedbfe6a92a036600b8dc1c2bc1d22b5e37422464582" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.435870 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-thjsw" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.438626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399","Type":"ContainerStarted","Data":"bad96dcf4a4f03f679f428d14fce3c35afb403afb6b9ec23a4c23d134a347cf3"} Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.440280 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-c1b2-account-create-update-4qfg8" event={"ID":"ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba","Type":"ContainerDied","Data":"c0320d40b0a8c57cd5e4e1d5a6e118ab755a15b53932caf49c8d5778ff4ec59c"} Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.440310 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c0320d40b0a8c57cd5e4e1d5a6e118ab755a15b53932caf49c8d5778ff4ec59c" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.440352 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-c1b2-account-create-update-4qfg8" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.442619 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f611-account-create-update-24frn" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.442628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f611-account-create-update-24frn" event={"ID":"b2c8cca0-a390-4db6-978d-d32af42ac290","Type":"ContainerDied","Data":"371a606c340092e497d2c113fec0b0b95c2621f63dcdf6fce11779af393265f8"} Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.442655 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="371a606c340092e497d2c113fec0b0b95c2621f63dcdf6fce11779af393265f8" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.444661 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" event={"ID":"03eee55b-b660-45e7-91db-4491a89910ad","Type":"ContainerStarted","Data":"42acf378391cd4c6cf088ef3bd14951905d0f62836c6ba0285422f9e6ac10b74"} Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.444855 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.447136 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-6db4d" event={"ID":"0832be69-9adb-40ba-a288-42ec7741fc19","Type":"ContainerDied","Data":"db1a7cd14a21bbf763b7ce8ed074aef35decb37529d326e0b68e724aeef549c5"} Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.447189 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db1a7cd14a21bbf763b7ce8ed074aef35decb37529d326e0b68e724aeef549c5" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.447189 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-6db4d" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.465864 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=6.712955309 podStartE2EDuration="37.465843802s" podCreationTimestamp="2025-11-28 13:49:34 +0000 UTC" firstStartedPulling="2025-11-28 13:49:40.179506118 +0000 UTC m=+1230.303447555" lastFinishedPulling="2025-11-28 13:50:10.932394611 +0000 UTC m=+1261.056336048" observedRunningTime="2025-11-28 13:50:11.460435227 +0000 UTC m=+1261.584376664" watchObservedRunningTime="2025-11-28 13:50:11.465843802 +0000 UTC m=+1261.589785249" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.488258 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" podStartSLOduration=2.488237335 podStartE2EDuration="2.488237335s" podCreationTimestamp="2025-11-28 13:50:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:50:11.48246697 +0000 UTC m=+1261.606408407" watchObservedRunningTime="2025-11-28 13:50:11.488237335 +0000 UTC m=+1261.612178772" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.875579 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 13:50:11 crc kubenswrapper[4857]: I1128 13:50:11.904864 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:11 crc kubenswrapper[4857]: E1128 13:50:11.905053 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:50:11 crc kubenswrapper[4857]: E1128 13:50:11.905085 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:50:11 crc kubenswrapper[4857]: E1128 13:50:11.905149 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift podName:7dacf187-3671-4114-a93e-e2296c8c20b2 nodeName:}" failed. No retries permitted until 2025-11-28 13:50:13.905130269 +0000 UTC m=+1264.029071706 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift") pod "swift-storage-0" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2") : configmap "swift-ring-files" not found Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.238845 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25ef3505-47be-41dd-88e8-0fdfa2fc80f3" path="/var/lib/kubelet/pods/25ef3505-47be-41dd-88e8-0fdfa2fc80f3/volumes" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.419745 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-6dhdz"] Nov 28 13:50:12 crc kubenswrapper[4857]: E1128 13:50:12.420148 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2c8cca0-a390-4db6-978d-d32af42ac290" containerName="mariadb-account-create-update" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420167 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2c8cca0-a390-4db6-978d-d32af42ac290" containerName="mariadb-account-create-update" Nov 28 13:50:12 crc kubenswrapper[4857]: E1128 13:50:12.420191 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0832be69-9adb-40ba-a288-42ec7741fc19" containerName="mariadb-database-create" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420199 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0832be69-9adb-40ba-a288-42ec7741fc19" containerName="mariadb-database-create" Nov 28 13:50:12 crc kubenswrapper[4857]: E1128 13:50:12.420214 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62737fad-a99b-41e3-9333-f3dd199563a7" containerName="mariadb-database-create" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420222 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="62737fad-a99b-41e3-9333-f3dd199563a7" containerName="mariadb-database-create" Nov 28 13:50:12 crc kubenswrapper[4857]: E1128 13:50:12.420231 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" containerName="mariadb-account-create-update" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420238 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" containerName="mariadb-account-create-update" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420458 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="62737fad-a99b-41e3-9333-f3dd199563a7" containerName="mariadb-database-create" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420482 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" containerName="mariadb-account-create-update" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420495 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0832be69-9adb-40ba-a288-42ec7741fc19" containerName="mariadb-database-create" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.420515 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2c8cca0-a390-4db6-978d-d32af42ac290" containerName="mariadb-account-create-update" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.421057 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.431454 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-6dhdz"] Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.515176 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e942756-b43c-4088-a714-7445b3dab481-operator-scripts\") pod \"glance-db-create-6dhdz\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.515358 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbwlf\" (UniqueName: \"kubernetes.io/projected/3e942756-b43c-4088-a714-7445b3dab481-kube-api-access-pbwlf\") pod \"glance-db-create-6dhdz\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.523996 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-25bf-account-create-update-2pcfz"] Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.525348 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.531869 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-25bf-account-create-update-2pcfz"] Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.531905 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.616755 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e942756-b43c-4088-a714-7445b3dab481-operator-scripts\") pod \"glance-db-create-6dhdz\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.616834 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbwlf\" (UniqueName: \"kubernetes.io/projected/3e942756-b43c-4088-a714-7445b3dab481-kube-api-access-pbwlf\") pod \"glance-db-create-6dhdz\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.616876 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlw6s\" (UniqueName: \"kubernetes.io/projected/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-kube-api-access-wlw6s\") pod \"glance-25bf-account-create-update-2pcfz\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.616917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-operator-scripts\") pod \"glance-25bf-account-create-update-2pcfz\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.617469 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e942756-b43c-4088-a714-7445b3dab481-operator-scripts\") pod \"glance-db-create-6dhdz\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.640218 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbwlf\" (UniqueName: \"kubernetes.io/projected/3e942756-b43c-4088-a714-7445b3dab481-kube-api-access-pbwlf\") pod \"glance-db-create-6dhdz\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.718638 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-operator-scripts\") pod \"glance-25bf-account-create-update-2pcfz\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.719290 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-operator-scripts\") pod \"glance-25bf-account-create-update-2pcfz\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.719508 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlw6s\" (UniqueName: \"kubernetes.io/projected/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-kube-api-access-wlw6s\") pod \"glance-25bf-account-create-update-2pcfz\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.735168 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlw6s\" (UniqueName: \"kubernetes.io/projected/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-kube-api-access-wlw6s\") pod \"glance-25bf-account-create-update-2pcfz\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.739460 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:12 crc kubenswrapper[4857]: I1128 13:50:12.851365 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.180618 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-6dhdz"] Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.312558 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-25bf-account-create-update-2pcfz"] Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.465870 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6dhdz" event={"ID":"3e942756-b43c-4088-a714-7445b3dab481","Type":"ContainerStarted","Data":"130df371f30e01f12e47e3db026bd7dbc2ef67bb3088c20d0b0da3ed10562080"} Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.465914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6dhdz" event={"ID":"3e942756-b43c-4088-a714-7445b3dab481","Type":"ContainerStarted","Data":"9ed05f01be49a52d3819ff7b5134f0bcbb21f5186e3073096ffdf5c8fbeb345b"} Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.469103 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-25bf-account-create-update-2pcfz" event={"ID":"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6","Type":"ContainerStarted","Data":"f92e33ffe665a2c08209c1d9b0953183209fcc8c900929235b85b2c7815d4fcb"} Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.469136 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-25bf-account-create-update-2pcfz" event={"ID":"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6","Type":"ContainerStarted","Data":"2020e579056c68d6133db82b772ca9ba8d35946978d3feade829ee6f5672ca97"} Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.487529 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-6dhdz" podStartSLOduration=1.487490928 podStartE2EDuration="1.487490928s" podCreationTimestamp="2025-11-28 13:50:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:50:13.480860009 +0000 UTC m=+1263.604801446" watchObservedRunningTime="2025-11-28 13:50:13.487490928 +0000 UTC m=+1263.611432365" Nov 28 13:50:13 crc kubenswrapper[4857]: E1128 13:50:13.941091 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:50:13 crc kubenswrapper[4857]: E1128 13:50:13.941457 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:50:13 crc kubenswrapper[4857]: E1128 13:50:13.941523 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift podName:7dacf187-3671-4114-a93e-e2296c8c20b2 nodeName:}" failed. No retries permitted until 2025-11-28 13:50:17.94150327 +0000 UTC m=+1268.065444707 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift") pod "swift-storage-0" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2") : configmap "swift-ring-files" not found Nov 28 13:50:13 crc kubenswrapper[4857]: I1128 13:50:13.940925 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.183049 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-9xbdh"] Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.184694 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.192918 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.192917 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.193015 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.194086 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9xbdh"] Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.254883 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-9xbdh"] Nov 28 13:50:14 crc kubenswrapper[4857]: E1128 13:50:14.255816 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-km629 ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-9xbdh" podUID="ea0ee1e2-f7b6-4b65-806f-208a67138dac" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256026 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km629\" (UniqueName: \"kubernetes.io/projected/ea0ee1e2-f7b6-4b65-806f-208a67138dac-kube-api-access-km629\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256096 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-dispersionconf\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256181 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ea0ee1e2-f7b6-4b65-806f-208a67138dac-etc-swift\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-ring-data-devices\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256270 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-swiftconf\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256324 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-scripts\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.256499 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-combined-ca-bundle\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.269309 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-j66cj"] Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.270646 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.300580 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-j66cj"] Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358303 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-ring-data-devices\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358358 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-swiftconf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358377 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d59bcbed-e787-486b-9efc-8c12f7e58960-etc-swift\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358397 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-swiftconf\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358428 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-combined-ca-bundle\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358453 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-scripts\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358489 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr5tf\" (UniqueName: \"kubernetes.io/projected/d59bcbed-e787-486b-9efc-8c12f7e58960-kube-api-access-pr5tf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-combined-ca-bundle\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358579 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-dispersionconf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358599 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km629\" (UniqueName: \"kubernetes.io/projected/ea0ee1e2-f7b6-4b65-806f-208a67138dac-kube-api-access-km629\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358656 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-dispersionconf\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358678 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-scripts\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358727 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ea0ee1e2-f7b6-4b65-806f-208a67138dac-etc-swift\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358785 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-ring-data-devices\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.358978 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ea0ee1e2-f7b6-4b65-806f-208a67138dac-etc-swift\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.359273 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-ring-data-devices\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.359572 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-scripts\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.364559 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-dispersionconf\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.365049 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-combined-ca-bundle\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.366052 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-swiftconf\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.378521 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km629\" (UniqueName: \"kubernetes.io/projected/ea0ee1e2-f7b6-4b65-806f-208a67138dac-kube-api-access-km629\") pod \"swift-ring-rebalance-9xbdh\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460441 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-dispersionconf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460509 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-scripts\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460567 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-ring-data-devices\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460601 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-swiftconf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460623 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d59bcbed-e787-486b-9efc-8c12f7e58960-etc-swift\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460657 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-combined-ca-bundle\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.460708 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr5tf\" (UniqueName: \"kubernetes.io/projected/d59bcbed-e787-486b-9efc-8c12f7e58960-kube-api-access-pr5tf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.461531 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-ring-data-devices\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.461538 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d59bcbed-e787-486b-9efc-8c12f7e58960-etc-swift\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.461866 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-scripts\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.464210 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-dispersionconf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.464620 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-swiftconf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.466620 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-combined-ca-bundle\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.477500 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr5tf\" (UniqueName: \"kubernetes.io/projected/d59bcbed-e787-486b-9efc-8c12f7e58960-kube-api-access-pr5tf\") pod \"swift-ring-rebalance-j66cj\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.479086 4857 generic.go:334] "Generic (PLEG): container finished" podID="3e942756-b43c-4088-a714-7445b3dab481" containerID="130df371f30e01f12e47e3db026bd7dbc2ef67bb3088c20d0b0da3ed10562080" exitCode=0 Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.479198 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6dhdz" event={"ID":"3e942756-b43c-4088-a714-7445b3dab481","Type":"ContainerDied","Data":"130df371f30e01f12e47e3db026bd7dbc2ef67bb3088c20d0b0da3ed10562080"} Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.480741 4857 generic.go:334] "Generic (PLEG): container finished" podID="9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" containerID="f92e33ffe665a2c08209c1d9b0953183209fcc8c900929235b85b2c7815d4fcb" exitCode=0 Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.480863 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.481800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-25bf-account-create-update-2pcfz" event={"ID":"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6","Type":"ContainerDied","Data":"f92e33ffe665a2c08209c1d9b0953183209fcc8c900929235b85b2c7815d4fcb"} Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.553331 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.591234 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.662824 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ea0ee1e2-f7b6-4b65-806f-208a67138dac-etc-swift\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.662906 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km629\" (UniqueName: \"kubernetes.io/projected/ea0ee1e2-f7b6-4b65-806f-208a67138dac-kube-api-access-km629\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663037 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-scripts\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663098 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-ring-data-devices\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663244 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea0ee1e2-f7b6-4b65-806f-208a67138dac-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663598 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-scripts" (OuterVolumeSpecName: "scripts") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663696 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663192 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-swiftconf\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.663821 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-dispersionconf\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.664357 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-combined-ca-bundle\") pod \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\" (UID: \"ea0ee1e2-f7b6-4b65-806f-208a67138dac\") " Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.665014 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ea0ee1e2-f7b6-4b65-806f-208a67138dac-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.665050 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.665066 4857 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ea0ee1e2-f7b6-4b65-806f-208a67138dac-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.667727 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea0ee1e2-f7b6-4b65-806f-208a67138dac-kube-api-access-km629" (OuterVolumeSpecName: "kube-api-access-km629") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "kube-api-access-km629". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.667769 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.667811 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.671979 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea0ee1e2-f7b6-4b65-806f-208a67138dac" (UID: "ea0ee1e2-f7b6-4b65-806f-208a67138dac"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.769737 4857 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.769768 4857 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.769782 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea0ee1e2-f7b6-4b65-806f-208a67138dac-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.769797 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km629\" (UniqueName: \"kubernetes.io/projected/ea0ee1e2-f7b6-4b65-806f-208a67138dac-kube-api-access-km629\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.914560 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 13:50:14 crc kubenswrapper[4857]: I1128 13:50:14.915247 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.073681 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-j66cj"] Nov 28 13:50:15 crc kubenswrapper[4857]: W1128 13:50:15.078327 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd59bcbed_e787_486b_9efc_8c12f7e58960.slice/crio-45e4efffe33a96f45d7a9225fe27f3063f96a101fd737bb9e9bfcdd47af23bbd WatchSource:0}: Error finding container 45e4efffe33a96f45d7a9225fe27f3063f96a101fd737bb9e9bfcdd47af23bbd: Status 404 returned error can't find the container with id 45e4efffe33a96f45d7a9225fe27f3063f96a101fd737bb9e9bfcdd47af23bbd Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.489973 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-j66cj" event={"ID":"d59bcbed-e787-486b-9efc-8c12f7e58960","Type":"ContainerStarted","Data":"45e4efffe33a96f45d7a9225fe27f3063f96a101fd737bb9e9bfcdd47af23bbd"} Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.490152 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9xbdh" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.551061 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-9xbdh"] Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.559871 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-9xbdh"] Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.925330 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.948678 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.991259 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e942756-b43c-4088-a714-7445b3dab481-operator-scripts\") pod \"3e942756-b43c-4088-a714-7445b3dab481\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.991610 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-operator-scripts\") pod \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.991849 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e942756-b43c-4088-a714-7445b3dab481-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3e942756-b43c-4088-a714-7445b3dab481" (UID: "3e942756-b43c-4088-a714-7445b3dab481"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.992126 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" (UID: "9f1e36e4-55cc-422a-9fad-ef48cc42cbe6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.992294 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbwlf\" (UniqueName: \"kubernetes.io/projected/3e942756-b43c-4088-a714-7445b3dab481-kube-api-access-pbwlf\") pod \"3e942756-b43c-4088-a714-7445b3dab481\" (UID: \"3e942756-b43c-4088-a714-7445b3dab481\") " Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.992990 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlw6s\" (UniqueName: \"kubernetes.io/projected/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-kube-api-access-wlw6s\") pod \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\" (UID: \"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6\") " Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.993342 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3e942756-b43c-4088-a714-7445b3dab481-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.993358 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:15 crc kubenswrapper[4857]: I1128 13:50:15.997389 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-kube-api-access-wlw6s" (OuterVolumeSpecName: "kube-api-access-wlw6s") pod "9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" (UID: "9f1e36e4-55cc-422a-9fad-ef48cc42cbe6"). InnerVolumeSpecName "kube-api-access-wlw6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.000192 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e942756-b43c-4088-a714-7445b3dab481-kube-api-access-pbwlf" (OuterVolumeSpecName: "kube-api-access-pbwlf") pod "3e942756-b43c-4088-a714-7445b3dab481" (UID: "3e942756-b43c-4088-a714-7445b3dab481"). InnerVolumeSpecName "kube-api-access-pbwlf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.095313 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbwlf\" (UniqueName: \"kubernetes.io/projected/3e942756-b43c-4088-a714-7445b3dab481-kube-api-access-pbwlf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.095354 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlw6s\" (UniqueName: \"kubernetes.io/projected/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6-kube-api-access-wlw6s\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.255003 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea0ee1e2-f7b6-4b65-806f-208a67138dac" path="/var/lib/kubelet/pods/ea0ee1e2-f7b6-4b65-806f-208a67138dac/volumes" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.513339 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-6dhdz" event={"ID":"3e942756-b43c-4088-a714-7445b3dab481","Type":"ContainerDied","Data":"9ed05f01be49a52d3819ff7b5134f0bcbb21f5186e3073096ffdf5c8fbeb345b"} Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.513399 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ed05f01be49a52d3819ff7b5134f0bcbb21f5186e3073096ffdf5c8fbeb345b" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.513363 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-6dhdz" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.517356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-25bf-account-create-update-2pcfz" event={"ID":"9f1e36e4-55cc-422a-9fad-ef48cc42cbe6","Type":"ContainerDied","Data":"2020e579056c68d6133db82b772ca9ba8d35946978d3feade829ee6f5672ca97"} Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.517391 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2020e579056c68d6133db82b772ca9ba8d35946978d3feade829ee6f5672ca97" Nov 28 13:50:16 crc kubenswrapper[4857]: I1128 13:50:16.517451 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-25bf-account-create-update-2pcfz" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.686964 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-f8mpb"] Nov 28 13:50:17 crc kubenswrapper[4857]: E1128 13:50:17.687557 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e942756-b43c-4088-a714-7445b3dab481" containerName="mariadb-database-create" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.687571 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e942756-b43c-4088-a714-7445b3dab481" containerName="mariadb-database-create" Nov 28 13:50:17 crc kubenswrapper[4857]: E1128 13:50:17.687587 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" containerName="mariadb-account-create-update" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.687593 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" containerName="mariadb-account-create-update" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.687777 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e942756-b43c-4088-a714-7445b3dab481" containerName="mariadb-database-create" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.687789 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" containerName="mariadb-account-create-update" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.688361 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.693132 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.694334 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-x48r5" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.698503 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-f8mpb"] Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.848181 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ccmh\" (UniqueName: \"kubernetes.io/projected/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-kube-api-access-2ccmh\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.848424 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-config-data\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.848633 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-db-sync-config-data\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.848808 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-combined-ca-bundle\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.958539 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.958693 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ccmh\" (UniqueName: \"kubernetes.io/projected/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-kube-api-access-2ccmh\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.958728 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-config-data\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.958843 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-db-sync-config-data\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: E1128 13:50:17.958876 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:50:17 crc kubenswrapper[4857]: E1128 13:50:17.958904 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:50:17 crc kubenswrapper[4857]: E1128 13:50:17.958972 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift podName:7dacf187-3671-4114-a93e-e2296c8c20b2 nodeName:}" failed. No retries permitted until 2025-11-28 13:50:25.958938253 +0000 UTC m=+1276.082879690 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift") pod "swift-storage-0" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2") : configmap "swift-ring-files" not found Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.958887 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-combined-ca-bundle\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.964780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-db-sync-config-data\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.965589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-config-data\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.965810 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-combined-ca-bundle\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:17 crc kubenswrapper[4857]: I1128 13:50:17.975858 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ccmh\" (UniqueName: \"kubernetes.io/projected/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-kube-api-access-2ccmh\") pod \"glance-db-sync-f8mpb\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:18 crc kubenswrapper[4857]: I1128 13:50:18.008739 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-f8mpb" Nov 28 13:50:19 crc kubenswrapper[4857]: I1128 13:50:19.444422 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:50:19 crc kubenswrapper[4857]: I1128 13:50:19.500604 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-r5hzx"] Nov 28 13:50:19 crc kubenswrapper[4857]: I1128 13:50:19.504911 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="dnsmasq-dns" containerID="cri-o://6dc53e260c595a9879f1e41321226dc8f2bca3cc8bc340c19e9ee55098c5b543" gracePeriod=10 Nov 28 13:50:20 crc kubenswrapper[4857]: I1128 13:50:20.556283 4857 generic.go:334] "Generic (PLEG): container finished" podID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerID="6dc53e260c595a9879f1e41321226dc8f2bca3cc8bc340c19e9ee55098c5b543" exitCode=0 Nov 28 13:50:20 crc kubenswrapper[4857]: I1128 13:50:20.556354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" event={"ID":"9cd2e351-6a59-49e5-89b3-d0d45981cfb3","Type":"ContainerDied","Data":"6dc53e260c595a9879f1e41321226dc8f2bca3cc8bc340c19e9ee55098c5b543"} Nov 28 13:50:20 crc kubenswrapper[4857]: I1128 13:50:20.919107 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.057548 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.058827 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.069045 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.069201 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.069387 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.069464 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-98xwh" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.104463 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.110775 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.111086 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.111225 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.111446 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.111621 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-scripts\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.111733 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-config\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.111839 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jm7jj\" (UniqueName: \"kubernetes.io/projected/e6597fde-8e34-4ccb-8784-1b7aa3680488-kube-api-access-jm7jj\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.213814 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.213867 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.213900 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.213971 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.214023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-scripts\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.214045 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-config\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.214066 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jm7jj\" (UniqueName: \"kubernetes.io/projected/e6597fde-8e34-4ccb-8784-1b7aa3680488-kube-api-access-jm7jj\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.214367 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.215537 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-scripts\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.216200 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-config\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.220417 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.221527 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.224358 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.229757 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jm7jj\" (UniqueName: \"kubernetes.io/projected/e6597fde-8e34-4ccb-8784-1b7aa3680488-kube-api-access-jm7jj\") pod \"ovn-northd-0\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " pod="openstack/ovn-northd-0" Nov 28 13:50:21 crc kubenswrapper[4857]: I1128 13:50:21.408524 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.034743 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-f8mpb"] Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.070085 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.163144 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.251072 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m98fc\" (UniqueName: \"kubernetes.io/projected/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-kube-api-access-m98fc\") pod \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.251503 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-config\") pod \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.251763 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-dns-svc\") pod \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\" (UID: \"9cd2e351-6a59-49e5-89b3-d0d45981cfb3\") " Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.256813 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-kube-api-access-m98fc" (OuterVolumeSpecName: "kube-api-access-m98fc") pod "9cd2e351-6a59-49e5-89b3-d0d45981cfb3" (UID: "9cd2e351-6a59-49e5-89b3-d0d45981cfb3"). InnerVolumeSpecName "kube-api-access-m98fc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.304679 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9cd2e351-6a59-49e5-89b3-d0d45981cfb3" (UID: "9cd2e351-6a59-49e5-89b3-d0d45981cfb3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.311570 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-config" (OuterVolumeSpecName: "config") pod "9cd2e351-6a59-49e5-89b3-d0d45981cfb3" (UID: "9cd2e351-6a59-49e5-89b3-d0d45981cfb3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.355963 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m98fc\" (UniqueName: \"kubernetes.io/projected/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-kube-api-access-m98fc\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.356035 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.356063 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9cd2e351-6a59-49e5-89b3-d0d45981cfb3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.598436 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e6597fde-8e34-4ccb-8784-1b7aa3680488","Type":"ContainerStarted","Data":"61fcce362257cc668b6c26a68f147eb2f41e15df46f8195149a15276320c116e"} Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.607481 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" event={"ID":"9cd2e351-6a59-49e5-89b3-d0d45981cfb3","Type":"ContainerDied","Data":"082cd85f18cc9acd90b32c340c7d83b4c19043c4424d8738a1966f257154b896"} Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.607541 4857 scope.go:117] "RemoveContainer" containerID="6dc53e260c595a9879f1e41321226dc8f2bca3cc8bc340c19e9ee55098c5b543" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.607667 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.610087 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-f8mpb" event={"ID":"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed","Type":"ContainerStarted","Data":"3be0492c1c532768e1493cbb4c8674b305df1092cd1463bcccec56a5278d8b5a"} Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.611596 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-j66cj" event={"ID":"d59bcbed-e787-486b-9efc-8c12f7e58960","Type":"ContainerStarted","Data":"d6ccb0e6b21baea5a7b5a6eca4876d506f37cb827f59f9ec638ab87806a36902"} Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.647266 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-j66cj" podStartSLOduration=1.6203262349999998 podStartE2EDuration="9.647086742s" podCreationTimestamp="2025-11-28 13:50:14 +0000 UTC" firstStartedPulling="2025-11-28 13:50:15.080824421 +0000 UTC m=+1265.204765858" lastFinishedPulling="2025-11-28 13:50:23.107584928 +0000 UTC m=+1273.231526365" observedRunningTime="2025-11-28 13:50:23.636098497 +0000 UTC m=+1273.760039934" watchObservedRunningTime="2025-11-28 13:50:23.647086742 +0000 UTC m=+1273.771028199" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.661774 4857 scope.go:117] "RemoveContainer" containerID="2057821e4fce08d803a52452d7fda50857a2c18e351f32b68064a8453ca5eb95" Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.673416 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-r5hzx"] Nov 28 13:50:23 crc kubenswrapper[4857]: I1128 13:50:23.688610 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-r5hzx"] Nov 28 13:50:24 crc kubenswrapper[4857]: I1128 13:50:24.241796 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" path="/var/lib/kubelet/pods/9cd2e351-6a59-49e5-89b3-d0d45981cfb3/volumes" Nov 28 13:50:25 crc kubenswrapper[4857]: I1128 13:50:25.636326 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e6597fde-8e34-4ccb-8784-1b7aa3680488","Type":"ContainerStarted","Data":"cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b"} Nov 28 13:50:26 crc kubenswrapper[4857]: I1128 13:50:26.012930 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:26 crc kubenswrapper[4857]: E1128 13:50:26.013139 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:50:26 crc kubenswrapper[4857]: E1128 13:50:26.013153 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 28 13:50:26 crc kubenswrapper[4857]: E1128 13:50:26.013196 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift podName:7dacf187-3671-4114-a93e-e2296c8c20b2 nodeName:}" failed. No retries permitted until 2025-11-28 13:50:42.013181589 +0000 UTC m=+1292.137123026 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift") pod "swift-storage-0" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2") : configmap "swift-ring-files" not found Nov 28 13:50:26 crc kubenswrapper[4857]: I1128 13:50:26.646806 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e6597fde-8e34-4ccb-8784-1b7aa3680488","Type":"ContainerStarted","Data":"f18e195c94e7d7c3606c3e2d5bbadff3adfb1a2f922e9e1872782f709251b377"} Nov 28 13:50:26 crc kubenswrapper[4857]: I1128 13:50:26.647157 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 13:50:26 crc kubenswrapper[4857]: I1128 13:50:26.679565 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.640069106 podStartE2EDuration="5.679541469s" podCreationTimestamp="2025-11-28 13:50:21 +0000 UTC" firstStartedPulling="2025-11-28 13:50:23.089870662 +0000 UTC m=+1273.213812099" lastFinishedPulling="2025-11-28 13:50:25.129343025 +0000 UTC m=+1275.253284462" observedRunningTime="2025-11-28 13:50:26.669291993 +0000 UTC m=+1276.793233430" watchObservedRunningTime="2025-11-28 13:50:26.679541469 +0000 UTC m=+1276.803482916" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.110139 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.126834 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.355262 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-zhh8w-config-rsr2b"] Nov 28 13:50:27 crc kubenswrapper[4857]: E1128 13:50:27.356116 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="dnsmasq-dns" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.356142 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="dnsmasq-dns" Nov 28 13:50:27 crc kubenswrapper[4857]: E1128 13:50:27.356181 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="init" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.356193 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="init" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.356547 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="dnsmasq-dns" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.357734 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.361084 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-zhh8w-config-rsr2b"] Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.368140 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.473455 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jnc8\" (UniqueName: \"kubernetes.io/projected/6cb75569-0597-4843-8917-1aa2551bb29a-kube-api-access-7jnc8\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.473565 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run-ovn\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.473582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-log-ovn\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.473599 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-additional-scripts\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.473627 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-scripts\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.473653 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.575291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-log-ovn\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.575339 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run-ovn\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.575370 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-additional-scripts\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.575407 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-scripts\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.575437 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.575508 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jnc8\" (UniqueName: \"kubernetes.io/projected/6cb75569-0597-4843-8917-1aa2551bb29a-kube-api-access-7jnc8\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.576158 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-log-ovn\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.576226 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run-ovn\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.578405 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.580580 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-additional-scripts\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.580928 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-scripts\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.597571 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jnc8\" (UniqueName: \"kubernetes.io/projected/6cb75569-0597-4843-8917-1aa2551bb29a-kube-api-access-7jnc8\") pod \"ovn-controller-zhh8w-config-rsr2b\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:27 crc kubenswrapper[4857]: I1128 13:50:27.689098 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.118795 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-r5hzx" podUID="9cd2e351-6a59-49e5-89b3-d0d45981cfb3" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.101:5353: i/o timeout" Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.386556 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-zhh8w-config-rsr2b"] Nov 28 13:50:28 crc kubenswrapper[4857]: W1128 13:50:28.397876 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cb75569_0597_4843_8917_1aa2551bb29a.slice/crio-4348eb86f9d12fcaa73f8c30f998bfdedf5ea60e88a5e2ff04db68cd0f8154bf WatchSource:0}: Error finding container 4348eb86f9d12fcaa73f8c30f998bfdedf5ea60e88a5e2ff04db68cd0f8154bf: Status 404 returned error can't find the container with id 4348eb86f9d12fcaa73f8c30f998bfdedf5ea60e88a5e2ff04db68cd0f8154bf Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.669237 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w-config-rsr2b" event={"ID":"6cb75569-0597-4843-8917-1aa2551bb29a","Type":"ContainerStarted","Data":"4348eb86f9d12fcaa73f8c30f998bfdedf5ea60e88a5e2ff04db68cd0f8154bf"} Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.671336 4857 generic.go:334] "Generic (PLEG): container finished" podID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerID="07476080bfc0082bdd51a54d167f1b8f0849c9dddec8d7f0531eba67b8803b47" exitCode=0 Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.671401 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e5550a25-04ef-4dde-afd4-627f1df97a90","Type":"ContainerDied","Data":"07476080bfc0082bdd51a54d167f1b8f0849c9dddec8d7f0531eba67b8803b47"} Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.672851 4857 generic.go:334] "Generic (PLEG): container finished" podID="e5ec18e7-6719-46dd-b580-303f3da41869" containerID="2ae1f1de2ee889c449e3ff3c827e646d41938be57fdf8267c513587b8dbd2ecb" exitCode=0 Nov 28 13:50:28 crc kubenswrapper[4857]: I1128 13:50:28.672883 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e5ec18e7-6719-46dd-b580-303f3da41869","Type":"ContainerDied","Data":"2ae1f1de2ee889c449e3ff3c827e646d41938be57fdf8267c513587b8dbd2ecb"} Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.685229 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e5550a25-04ef-4dde-afd4-627f1df97a90","Type":"ContainerStarted","Data":"3bfaa6d12b2af65b2c4bc9e67c77c455db443837bdebce53cc1736765094b03f"} Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.685835 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.690151 4857 generic.go:334] "Generic (PLEG): container finished" podID="6cb75569-0597-4843-8917-1aa2551bb29a" containerID="10b19e1335d4a44e15430b19e7ace5a55ec7270bd18000a142d0b76ec25527fa" exitCode=0 Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.690238 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w-config-rsr2b" event={"ID":"6cb75569-0597-4843-8917-1aa2551bb29a","Type":"ContainerDied","Data":"10b19e1335d4a44e15430b19e7ace5a55ec7270bd18000a142d0b76ec25527fa"} Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.693733 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e5ec18e7-6719-46dd-b580-303f3da41869","Type":"ContainerStarted","Data":"af0d6704e41a2d48d923ac3295bc509705c2c44269b7920b44c7cf73327f3eda"} Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.694098 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.742875 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=53.891220439 podStartE2EDuration="1m7.742852996s" podCreationTimestamp="2025-11-28 13:49:22 +0000 UTC" firstStartedPulling="2025-11-28 13:49:39.69667595 +0000 UTC m=+1229.820617387" lastFinishedPulling="2025-11-28 13:49:53.548308507 +0000 UTC m=+1243.672249944" observedRunningTime="2025-11-28 13:50:29.716683511 +0000 UTC m=+1279.840624948" watchObservedRunningTime="2025-11-28 13:50:29.742852996 +0000 UTC m=+1279.866794433" Nov 28 13:50:29 crc kubenswrapper[4857]: I1128 13:50:29.764494 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=55.048102782 podStartE2EDuration="1m7.764475298s" podCreationTimestamp="2025-11-28 13:49:22 +0000 UTC" firstStartedPulling="2025-11-28 13:49:40.095912307 +0000 UTC m=+1230.219853744" lastFinishedPulling="2025-11-28 13:49:52.812284823 +0000 UTC m=+1242.936226260" observedRunningTime="2025-11-28 13:50:29.757024497 +0000 UTC m=+1279.880965954" watchObservedRunningTime="2025-11-28 13:50:29.764475298 +0000 UTC m=+1279.888416735" Nov 28 13:50:33 crc kubenswrapper[4857]: I1128 13:50:33.733114 4857 generic.go:334] "Generic (PLEG): container finished" podID="d59bcbed-e787-486b-9efc-8c12f7e58960" containerID="d6ccb0e6b21baea5a7b5a6eca4876d506f37cb827f59f9ec638ab87806a36902" exitCode=0 Nov 28 13:50:33 crc kubenswrapper[4857]: I1128 13:50:33.733574 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-j66cj" event={"ID":"d59bcbed-e787-486b-9efc-8c12f7e58960","Type":"ContainerDied","Data":"d6ccb0e6b21baea5a7b5a6eca4876d506f37cb827f59f9ec638ab87806a36902"} Nov 28 13:50:36 crc kubenswrapper[4857]: I1128 13:50:36.507706 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 13:50:42 crc kubenswrapper[4857]: I1128 13:50:42.054907 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:42 crc kubenswrapper[4857]: I1128 13:50:42.059687 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-zhh8w" Nov 28 13:50:42 crc kubenswrapper[4857]: I1128 13:50:42.074737 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"swift-storage-0\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " pod="openstack/swift-storage-0" Nov 28 13:50:42 crc kubenswrapper[4857]: I1128 13:50:42.324192 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:50:44 crc kubenswrapper[4857]: I1128 13:50:44.021858 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 13:50:44 crc kubenswrapper[4857]: I1128 13:50:44.214351 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 28 13:50:45 crc kubenswrapper[4857]: E1128 13:50:45.649498 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Nov 28 13:50:45 crc kubenswrapper[4857]: E1128 13:50:45.649974 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2ccmh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-f8mpb_openstack(55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:50:45 crc kubenswrapper[4857]: E1128 13:50:45.651370 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-f8mpb" podUID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.740555 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.755308 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.835241 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-j66cj" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.835244 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-j66cj" event={"ID":"d59bcbed-e787-486b-9efc-8c12f7e58960","Type":"ContainerDied","Data":"45e4efffe33a96f45d7a9225fe27f3063f96a101fd737bb9e9bfcdd47af23bbd"} Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.836020 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45e4efffe33a96f45d7a9225fe27f3063f96a101fd737bb9e9bfcdd47af23bbd" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.837766 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-rsr2b" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.838092 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w-config-rsr2b" event={"ID":"6cb75569-0597-4843-8917-1aa2551bb29a","Type":"ContainerDied","Data":"4348eb86f9d12fcaa73f8c30f998bfdedf5ea60e88a5e2ff04db68cd0f8154bf"} Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.838148 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4348eb86f9d12fcaa73f8c30f998bfdedf5ea60e88a5e2ff04db68cd0f8154bf" Nov 28 13:50:45 crc kubenswrapper[4857]: E1128 13:50:45.838598 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-f8mpb" podUID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927707 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-scripts\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927779 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-combined-ca-bundle\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927806 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-additional-scripts\") pod \"6cb75569-0597-4843-8917-1aa2551bb29a\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927838 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr5tf\" (UniqueName: \"kubernetes.io/projected/d59bcbed-e787-486b-9efc-8c12f7e58960-kube-api-access-pr5tf\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927916 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-log-ovn\") pod \"6cb75569-0597-4843-8917-1aa2551bb29a\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927933 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jnc8\" (UniqueName: \"kubernetes.io/projected/6cb75569-0597-4843-8917-1aa2551bb29a-kube-api-access-7jnc8\") pod \"6cb75569-0597-4843-8917-1aa2551bb29a\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.927983 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-dispersionconf\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928007 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run\") pod \"6cb75569-0597-4843-8917-1aa2551bb29a\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928038 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-ring-data-devices\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928099 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-swiftconf\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928127 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-scripts\") pod \"6cb75569-0597-4843-8917-1aa2551bb29a\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928167 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d59bcbed-e787-486b-9efc-8c12f7e58960-etc-swift\") pod \"d59bcbed-e787-486b-9efc-8c12f7e58960\" (UID: \"d59bcbed-e787-486b-9efc-8c12f7e58960\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928194 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run-ovn\") pod \"6cb75569-0597-4843-8917-1aa2551bb29a\" (UID: \"6cb75569-0597-4843-8917-1aa2551bb29a\") " Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.928536 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "6cb75569-0597-4843-8917-1aa2551bb29a" (UID: "6cb75569-0597-4843-8917-1aa2551bb29a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.933107 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-scripts" (OuterVolumeSpecName: "scripts") pod "6cb75569-0597-4843-8917-1aa2551bb29a" (UID: "6cb75569-0597-4843-8917-1aa2551bb29a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.934283 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.935044 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d59bcbed-e787-486b-9efc-8c12f7e58960-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.935539 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run" (OuterVolumeSpecName: "var-run") pod "6cb75569-0597-4843-8917-1aa2551bb29a" (UID: "6cb75569-0597-4843-8917-1aa2551bb29a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.936477 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.936560 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "6cb75569-0597-4843-8917-1aa2551bb29a" (UID: "6cb75569-0597-4843-8917-1aa2551bb29a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.937766 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d59bcbed-e787-486b-9efc-8c12f7e58960-kube-api-access-pr5tf" (OuterVolumeSpecName: "kube-api-access-pr5tf") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "kube-api-access-pr5tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.942727 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cb75569-0597-4843-8917-1aa2551bb29a-kube-api-access-7jnc8" (OuterVolumeSpecName: "kube-api-access-7jnc8") pod "6cb75569-0597-4843-8917-1aa2551bb29a" (UID: "6cb75569-0597-4843-8917-1aa2551bb29a"). InnerVolumeSpecName "kube-api-access-7jnc8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.956518 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "6cb75569-0597-4843-8917-1aa2551bb29a" (UID: "6cb75569-0597-4843-8917-1aa2551bb29a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.959691 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.961732 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-scripts" (OuterVolumeSpecName: "scripts") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:45 crc kubenswrapper[4857]: I1128 13:50:45.965933 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d59bcbed-e787-486b-9efc-8c12f7e58960" (UID: "d59bcbed-e787-486b-9efc-8c12f7e58960"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030156 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030182 4857 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030194 4857 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030203 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030211 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/d59bcbed-e787-486b-9efc-8c12f7e58960-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030219 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030227 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d59bcbed-e787-486b-9efc-8c12f7e58960-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030235 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030243 4857 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/6cb75569-0597-4843-8917-1aa2551bb29a-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030251 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr5tf\" (UniqueName: \"kubernetes.io/projected/d59bcbed-e787-486b-9efc-8c12f7e58960-kube-api-access-pr5tf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030261 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6cb75569-0597-4843-8917-1aa2551bb29a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030270 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jnc8\" (UniqueName: \"kubernetes.io/projected/6cb75569-0597-4843-8917-1aa2551bb29a-kube-api-access-7jnc8\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.030278 4857 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/d59bcbed-e787-486b-9efc-8c12f7e58960-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.142038 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:50:46 crc kubenswrapper[4857]: W1128 13:50:46.144445 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dacf187_3671_4114_a93e_e2296c8c20b2.slice/crio-54af9ca9a947932fbcf2afe5ca523893b1211c8003a55f120e619d2e792d7265 WatchSource:0}: Error finding container 54af9ca9a947932fbcf2afe5ca523893b1211c8003a55f120e619d2e792d7265: Status 404 returned error can't find the container with id 54af9ca9a947932fbcf2afe5ca523893b1211c8003a55f120e619d2e792d7265 Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.876940 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"54af9ca9a947932fbcf2afe5ca523893b1211c8003a55f120e619d2e792d7265"} Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.898128 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-zhh8w-config-rsr2b"] Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.909349 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-zhh8w-config-rsr2b"] Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.989961 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-zhh8w-config-5qwq9"] Nov 28 13:50:46 crc kubenswrapper[4857]: E1128 13:50:46.990258 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cb75569-0597-4843-8917-1aa2551bb29a" containerName="ovn-config" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.990271 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cb75569-0597-4843-8917-1aa2551bb29a" containerName="ovn-config" Nov 28 13:50:46 crc kubenswrapper[4857]: E1128 13:50:46.990307 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d59bcbed-e787-486b-9efc-8c12f7e58960" containerName="swift-ring-rebalance" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.990314 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d59bcbed-e787-486b-9efc-8c12f7e58960" containerName="swift-ring-rebalance" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.990446 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d59bcbed-e787-486b-9efc-8c12f7e58960" containerName="swift-ring-rebalance" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.990464 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cb75569-0597-4843-8917-1aa2551bb29a" containerName="ovn-config" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.990987 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:46 crc kubenswrapper[4857]: I1128 13:50:46.993589 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.001610 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-zhh8w-config-5qwq9"] Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.150187 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-additional-scripts\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.150223 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncvtj\" (UniqueName: \"kubernetes.io/projected/5aab28bb-b2c7-40ce-8588-29481509355a-kube-api-access-ncvtj\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.150267 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run-ovn\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.150331 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-log-ovn\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.150471 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-scripts\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.150553 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.252083 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-additional-scripts\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.252162 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncvtj\" (UniqueName: \"kubernetes.io/projected/5aab28bb-b2c7-40ce-8588-29481509355a-kube-api-access-ncvtj\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.252269 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run-ovn\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.252399 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-log-ovn\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.252509 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-scripts\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.252567 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.253030 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-additional-scripts\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.253067 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.253150 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-log-ovn\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.253182 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run-ovn\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.254745 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-scripts\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.274573 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncvtj\" (UniqueName: \"kubernetes.io/projected/5aab28bb-b2c7-40ce-8588-29481509355a-kube-api-access-ncvtj\") pod \"ovn-controller-zhh8w-config-5qwq9\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.309122 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.827261 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-zhh8w-config-5qwq9"] Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.939180 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w-config-5qwq9" event={"ID":"5aab28bb-b2c7-40ce-8588-29481509355a","Type":"ContainerStarted","Data":"0c3ee094e72943f77fdd05a6f38e209d7f81dd82b73f8ce0b67059878a6adedb"} Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.953066 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"7f8c7069308f0a6173f6d99219cd32e4aee978bcd30684463830f8831fa3dc54"} Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.953104 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"4e216ae629f3bf57169773aa0918d2098ca8722a57b52a0b59f920ee5fe40042"} Nov 28 13:50:47 crc kubenswrapper[4857]: I1128 13:50:47.953114 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"997f645a4bf32bedcf75f896750b179b3b64c864f0aa44fb1505c0ce4a2004d3"} Nov 28 13:50:48 crc kubenswrapper[4857]: I1128 13:50:48.237576 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cb75569-0597-4843-8917-1aa2551bb29a" path="/var/lib/kubelet/pods/6cb75569-0597-4843-8917-1aa2551bb29a/volumes" Nov 28 13:50:48 crc kubenswrapper[4857]: I1128 13:50:48.964538 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"684d617680cd018b845bc46ea83aaf268880e5e96d90c1f4c74455668100a5fb"} Nov 28 13:50:48 crc kubenswrapper[4857]: I1128 13:50:48.966773 4857 generic.go:334] "Generic (PLEG): container finished" podID="5aab28bb-b2c7-40ce-8588-29481509355a" containerID="8c8913b9cca44eed468d286bc63c3d77ee68dc07c0d789961886533d02b3aad5" exitCode=0 Nov 28 13:50:48 crc kubenswrapper[4857]: I1128 13:50:48.966832 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w-config-5qwq9" event={"ID":"5aab28bb-b2c7-40ce-8588-29481509355a","Type":"ContainerDied","Data":"8c8913b9cca44eed468d286bc63c3d77ee68dc07c0d789961886533d02b3aad5"} Nov 28 13:50:49 crc kubenswrapper[4857]: I1128 13:50:49.980581 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"336ae6d3cd985dc92a4379227cc3a7df30cdbac847520f8327e5fce00a85e01c"} Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.307502 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400534 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncvtj\" (UniqueName: \"kubernetes.io/projected/5aab28bb-b2c7-40ce-8588-29481509355a-kube-api-access-ncvtj\") pod \"5aab28bb-b2c7-40ce-8588-29481509355a\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400598 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-scripts\") pod \"5aab28bb-b2c7-40ce-8588-29481509355a\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400621 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-additional-scripts\") pod \"5aab28bb-b2c7-40ce-8588-29481509355a\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400690 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run-ovn\") pod \"5aab28bb-b2c7-40ce-8588-29481509355a\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400798 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "5aab28bb-b2c7-40ce-8588-29481509355a" (UID: "5aab28bb-b2c7-40ce-8588-29481509355a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400847 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-log-ovn\") pod \"5aab28bb-b2c7-40ce-8588-29481509355a\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400878 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "5aab28bb-b2c7-40ce-8588-29481509355a" (UID: "5aab28bb-b2c7-40ce-8588-29481509355a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400902 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run\") pod \"5aab28bb-b2c7-40ce-8588-29481509355a\" (UID: \"5aab28bb-b2c7-40ce-8588-29481509355a\") " Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.400956 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run" (OuterVolumeSpecName: "var-run") pod "5aab28bb-b2c7-40ce-8588-29481509355a" (UID: "5aab28bb-b2c7-40ce-8588-29481509355a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.401254 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.401272 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.401281 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5aab28bb-b2c7-40ce-8588-29481509355a-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.401803 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-scripts" (OuterVolumeSpecName: "scripts") pod "5aab28bb-b2c7-40ce-8588-29481509355a" (UID: "5aab28bb-b2c7-40ce-8588-29481509355a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.402249 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "5aab28bb-b2c7-40ce-8588-29481509355a" (UID: "5aab28bb-b2c7-40ce-8588-29481509355a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.406020 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aab28bb-b2c7-40ce-8588-29481509355a-kube-api-access-ncvtj" (OuterVolumeSpecName: "kube-api-access-ncvtj") pod "5aab28bb-b2c7-40ce-8588-29481509355a" (UID: "5aab28bb-b2c7-40ce-8588-29481509355a"). InnerVolumeSpecName "kube-api-access-ncvtj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.503287 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncvtj\" (UniqueName: \"kubernetes.io/projected/5aab28bb-b2c7-40ce-8588-29481509355a-kube-api-access-ncvtj\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.503324 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.503333 4857 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/5aab28bb-b2c7-40ce-8588-29481509355a-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.991181 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"2761311c0d0657f7895e8425cee039b5e9d1f6b44d147413193ce1a637e0d206"} Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.991565 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"043905c220a58f2cc3f72c7c5ff10d14437639fd54c68815c19eb821d3f8691b"} Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.992717 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w-config-5qwq9" event={"ID":"5aab28bb-b2c7-40ce-8588-29481509355a","Type":"ContainerDied","Data":"0c3ee094e72943f77fdd05a6f38e209d7f81dd82b73f8ce0b67059878a6adedb"} Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.992741 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c3ee094e72943f77fdd05a6f38e209d7f81dd82b73f8ce0b67059878a6adedb" Nov 28 13:50:50 crc kubenswrapper[4857]: I1128 13:50:50.992784 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w-config-5qwq9" Nov 28 13:50:51 crc kubenswrapper[4857]: I1128 13:50:51.377480 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-zhh8w-config-5qwq9"] Nov 28 13:50:51 crc kubenswrapper[4857]: I1128 13:50:51.385463 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-zhh8w-config-5qwq9"] Nov 28 13:50:52 crc kubenswrapper[4857]: I1128 13:50:52.007348 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"bd6d4921953db76ad5068a581723271ca2d557ab2915b227c4ec3a2e35dbb714"} Nov 28 13:50:52 crc kubenswrapper[4857]: I1128 13:50:52.239150 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aab28bb-b2c7-40ce-8588-29481509355a" path="/var/lib/kubelet/pods/5aab28bb-b2c7-40ce-8588-29481509355a/volumes" Nov 28 13:50:53 crc kubenswrapper[4857]: I1128 13:50:53.017959 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"fca2aa8f676e96e45c93a697eeabe06bb4bf6351b5998989a5d94cd1c765da97"} Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.024541 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.036311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"75a66b9748e2f49b7fe56ad9c99da91918be5cc9c7c5b50c82a8f29587c6dd41"} Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.036349 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"9636087b646b01f79b8ba6470a873b59e807cd17b6f1e033005a6e5655b75269"} Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.036359 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"f27c04a27d6f3fb5f92f4acda7c42185f0f649ff426b2f0c0fc82c87eb5c2df2"} Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.036369 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"1dba2da3e5dc861fc5c8a1daae2fd1574ec3c6ba40ffdb0cbfcb19d46be889c1"} Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.036378 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"797af21b1a27903599b036f4c694bee114318cb17d785c9ef036ff8854701e9f"} Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.218191 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.399503 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-csvmd"] Nov 28 13:50:54 crc kubenswrapper[4857]: E1128 13:50:54.399920 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aab28bb-b2c7-40ce-8588-29481509355a" containerName="ovn-config" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.399964 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aab28bb-b2c7-40ce-8588-29481509355a" containerName="ovn-config" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.400163 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aab28bb-b2c7-40ce-8588-29481509355a" containerName="ovn-config" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.400904 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.408471 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-csvmd"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.475069 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-s5qtj"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.480102 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.488038 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s5qtj"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.571865 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df15232-d07b-49ae-99b9-60bb31ad3ff3-operator-scripts\") pod \"cinder-db-create-csvmd\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.571994 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/967609ba-f165-4bfa-b13d-d23154c329e7-operator-scripts\") pod \"barbican-db-create-s5qtj\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.572054 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv7z4\" (UniqueName: \"kubernetes.io/projected/967609ba-f165-4bfa-b13d-d23154c329e7-kube-api-access-cv7z4\") pod \"barbican-db-create-s5qtj\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.572080 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbwr9\" (UniqueName: \"kubernetes.io/projected/2df15232-d07b-49ae-99b9-60bb31ad3ff3-kube-api-access-bbwr9\") pod \"cinder-db-create-csvmd\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.599334 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-f448-account-create-update-976jr"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.601119 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.606330 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.615247 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f448-account-create-update-976jr"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.673560 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/967609ba-f165-4bfa-b13d-d23154c329e7-operator-scripts\") pod \"barbican-db-create-s5qtj\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.674016 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv7z4\" (UniqueName: \"kubernetes.io/projected/967609ba-f165-4bfa-b13d-d23154c329e7-kube-api-access-cv7z4\") pod \"barbican-db-create-s5qtj\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.674344 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/967609ba-f165-4bfa-b13d-d23154c329e7-operator-scripts\") pod \"barbican-db-create-s5qtj\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.674571 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbwr9\" (UniqueName: \"kubernetes.io/projected/2df15232-d07b-49ae-99b9-60bb31ad3ff3-kube-api-access-bbwr9\") pod \"cinder-db-create-csvmd\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.674893 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df15232-d07b-49ae-99b9-60bb31ad3ff3-operator-scripts\") pod \"cinder-db-create-csvmd\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.675575 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df15232-d07b-49ae-99b9-60bb31ad3ff3-operator-scripts\") pod \"cinder-db-create-csvmd\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.691269 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-8199-account-create-update-7ssjr"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.693005 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.695526 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.702643 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-8ngsm"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.704437 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.709988 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-8199-account-create-update-7ssjr"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.720649 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv7z4\" (UniqueName: \"kubernetes.io/projected/967609ba-f165-4bfa-b13d-d23154c329e7-kube-api-access-cv7z4\") pod \"barbican-db-create-s5qtj\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.723019 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8ngsm"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.727237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbwr9\" (UniqueName: \"kubernetes.io/projected/2df15232-d07b-49ae-99b9-60bb31ad3ff3-kube-api-access-bbwr9\") pod \"cinder-db-create-csvmd\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.776126 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcs7x\" (UniqueName: \"kubernetes.io/projected/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-kube-api-access-xcs7x\") pod \"barbican-8199-account-create-update-7ssjr\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.776182 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-operator-scripts\") pod \"cinder-f448-account-create-update-976jr\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.776274 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-operator-scripts\") pod \"barbican-8199-account-create-update-7ssjr\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.776293 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8kns\" (UniqueName: \"kubernetes.io/projected/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-kube-api-access-m8kns\") pod \"cinder-f448-account-create-update-976jr\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.778631 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-55l57"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.779674 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.785238 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.785457 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.786076 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.786213 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49rtd" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.792310 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-55l57"] Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.795452 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq6vn\" (UniqueName: \"kubernetes.io/projected/1237773a-dc19-47c6-90cc-eb0de954d9b4-kube-api-access-kq6vn\") pod \"neutron-db-create-8ngsm\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877585 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-config-data\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877621 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-operator-scripts\") pod \"barbican-8199-account-create-update-7ssjr\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877643 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8kns\" (UniqueName: \"kubernetes.io/projected/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-kube-api-access-m8kns\") pod \"cinder-f448-account-create-update-976jr\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877765 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-combined-ca-bundle\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877807 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lbpl\" (UniqueName: \"kubernetes.io/projected/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-kube-api-access-6lbpl\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.877993 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcs7x\" (UniqueName: \"kubernetes.io/projected/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-kube-api-access-xcs7x\") pod \"barbican-8199-account-create-update-7ssjr\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.878103 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-operator-scripts\") pod \"cinder-f448-account-create-update-976jr\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.878172 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1237773a-dc19-47c6-90cc-eb0de954d9b4-operator-scripts\") pod \"neutron-db-create-8ngsm\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.878408 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-operator-scripts\") pod \"barbican-8199-account-create-update-7ssjr\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.878837 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-operator-scripts\") pod \"cinder-f448-account-create-update-976jr\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.897383 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcs7x\" (UniqueName: \"kubernetes.io/projected/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-kube-api-access-xcs7x\") pod \"barbican-8199-account-create-update-7ssjr\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.901252 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8kns\" (UniqueName: \"kubernetes.io/projected/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-kube-api-access-m8kns\") pod \"cinder-f448-account-create-update-976jr\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:54 crc kubenswrapper[4857]: I1128 13:50:54.927588 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.980612 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq6vn\" (UniqueName: \"kubernetes.io/projected/1237773a-dc19-47c6-90cc-eb0de954d9b4-kube-api-access-kq6vn\") pod \"neutron-db-create-8ngsm\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.980739 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-config-data\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.980844 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-combined-ca-bundle\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.980870 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lbpl\" (UniqueName: \"kubernetes.io/projected/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-kube-api-access-6lbpl\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.981021 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1237773a-dc19-47c6-90cc-eb0de954d9b4-operator-scripts\") pod \"neutron-db-create-8ngsm\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.985199 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-combined-ca-bundle\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.996283 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-config-data\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:54.997199 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1237773a-dc19-47c6-90cc-eb0de954d9b4-operator-scripts\") pod \"neutron-db-create-8ngsm\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.012230 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq6vn\" (UniqueName: \"kubernetes.io/projected/1237773a-dc19-47c6-90cc-eb0de954d9b4-kube-api-access-kq6vn\") pod \"neutron-db-create-8ngsm\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.013422 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lbpl\" (UniqueName: \"kubernetes.io/projected/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-kube-api-access-6lbpl\") pod \"keystone-db-sync-55l57\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.017189 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-0f31-account-create-update-zmqth"] Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.018363 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.021173 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.034548 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-csvmd" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.060641 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0f31-account-create-update-zmqth"] Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.083288 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerStarted","Data":"cf76c7b61e8171e795785fc16f94d5afd912c93529cbd3ce652f846abc4be50f"} Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.095632 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.157097 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=39.706797473 podStartE2EDuration="46.15707828s" podCreationTimestamp="2025-11-28 13:50:09 +0000 UTC" firstStartedPulling="2025-11-28 13:50:46.146298432 +0000 UTC m=+1296.270239869" lastFinishedPulling="2025-11-28 13:50:52.596579229 +0000 UTC m=+1302.720520676" observedRunningTime="2025-11-28 13:50:55.148642453 +0000 UTC m=+1305.272583890" watchObservedRunningTime="2025-11-28 13:50:55.15707828 +0000 UTC m=+1305.281019717" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.186842 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8ngsm" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.187902 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95ndr\" (UniqueName: \"kubernetes.io/projected/ead17b20-d23f-4794-9f28-4a536c60c48c-kube-api-access-95ndr\") pod \"neutron-0f31-account-create-update-zmqth\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.187999 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead17b20-d23f-4794-9f28-4a536c60c48c-operator-scripts\") pod \"neutron-0f31-account-create-update-zmqth\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.190041 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-55l57" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.289852 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead17b20-d23f-4794-9f28-4a536c60c48c-operator-scripts\") pod \"neutron-0f31-account-create-update-zmqth\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.290006 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95ndr\" (UniqueName: \"kubernetes.io/projected/ead17b20-d23f-4794-9f28-4a536c60c48c-kube-api-access-95ndr\") pod \"neutron-0f31-account-create-update-zmqth\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.290784 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead17b20-d23f-4794-9f28-4a536c60c48c-operator-scripts\") pod \"neutron-0f31-account-create-update-zmqth\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.292292 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-s5qtj"] Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.318662 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95ndr\" (UniqueName: \"kubernetes.io/projected/ead17b20-d23f-4794-9f28-4a536c60c48c-kube-api-access-95ndr\") pod \"neutron-0f31-account-create-update-zmqth\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.359831 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.445060 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rv95b"] Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.446721 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.450346 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.458651 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rv95b"] Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.594858 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.595210 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.595243 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-config\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.595277 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.595416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.595645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpk5x\" (UniqueName: \"kubernetes.io/projected/61e58ae5-9323-422f-ae62-a88146ae3beb-kube-api-access-xpk5x\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.696874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-config\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.696974 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.697014 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.698042 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.698153 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.698209 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-config\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.699086 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpk5x\" (UniqueName: \"kubernetes.io/projected/61e58ae5-9323-422f-ae62-a88146ae3beb-kube-api-access-xpk5x\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.699273 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.699310 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.700068 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.700467 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.724496 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpk5x\" (UniqueName: \"kubernetes.io/projected/61e58ae5-9323-422f-ae62-a88146ae3beb-kube-api-access-xpk5x\") pod \"dnsmasq-dns-5c79d794d7-rv95b\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:55 crc kubenswrapper[4857]: I1128 13:50:55.815503 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.084222 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-8199-account-create-update-7ssjr"] Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.125674 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-55l57"] Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.135409 4857 generic.go:334] "Generic (PLEG): container finished" podID="967609ba-f165-4bfa-b13d-d23154c329e7" containerID="8308e427b7faf625a092145ecb316284c50212e2707d538795b44f416b0d86a3" exitCode=0 Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.135502 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s5qtj" event={"ID":"967609ba-f165-4bfa-b13d-d23154c329e7","Type":"ContainerDied","Data":"8308e427b7faf625a092145ecb316284c50212e2707d538795b44f416b0d86a3"} Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.135529 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s5qtj" event={"ID":"967609ba-f165-4bfa-b13d-d23154c329e7","Type":"ContainerStarted","Data":"5aea58e6102817516d8d96df45e6a3c451f89fb4e8ec4c19055f0fb342171b37"} Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.150606 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8199-account-create-update-7ssjr" event={"ID":"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f","Type":"ContainerStarted","Data":"bf55e26660029126cb8b3bc70c827ba48dc522c62836570f24eaa4a0a4051c63"} Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.256810 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rv95b"] Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.299446 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-0f31-account-create-update-zmqth"] Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.308864 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-8ngsm"] Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.389015 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-csvmd"] Nov 28 13:50:56 crc kubenswrapper[4857]: I1128 13:50:56.396336 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f448-account-create-update-976jr"] Nov 28 13:50:56 crc kubenswrapper[4857]: W1128 13:50:56.408708 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c66fec5_8c9d_44f0_8a86_8dd74ea565b2.slice/crio-f592eb06323ed6abff2076f2bf956fd98ce5bbb29b289eaef35d0c9fddc53e27 WatchSource:0}: Error finding container f592eb06323ed6abff2076f2bf956fd98ce5bbb29b289eaef35d0c9fddc53e27: Status 404 returned error can't find the container with id f592eb06323ed6abff2076f2bf956fd98ce5bbb29b289eaef35d0c9fddc53e27 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.177734 4857 generic.go:334] "Generic (PLEG): container finished" podID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerID="9e1c7df89c44e6547ef9b5da5406b57747caa7630294b5760a04f4cd26391b71" exitCode=0 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.177806 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" event={"ID":"61e58ae5-9323-422f-ae62-a88146ae3beb","Type":"ContainerDied","Data":"9e1c7df89c44e6547ef9b5da5406b57747caa7630294b5760a04f4cd26391b71"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.177836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" event={"ID":"61e58ae5-9323-422f-ae62-a88146ae3beb","Type":"ContainerStarted","Data":"eeb6d6d5383bd9e5dd6eeba7f122b9e73d1d16c62826ef42e397bdbaba99ec2f"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.181623 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-55l57" event={"ID":"d0502ed7-3dd0-48b6-ba25-44dae8b21aef","Type":"ContainerStarted","Data":"ce5bbe5f1d9aef1e01817a3d215d02b17205a5930d945e9f735778dc4a90027c"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.184028 4857 generic.go:334] "Generic (PLEG): container finished" podID="2df15232-d07b-49ae-99b9-60bb31ad3ff3" containerID="9fab81523ebe7c639a203d355f9824824a56a7ef9ae0b6418036872dfca60c23" exitCode=0 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.184119 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-csvmd" event={"ID":"2df15232-d07b-49ae-99b9-60bb31ad3ff3","Type":"ContainerDied","Data":"9fab81523ebe7c639a203d355f9824824a56a7ef9ae0b6418036872dfca60c23"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.184220 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-csvmd" event={"ID":"2df15232-d07b-49ae-99b9-60bb31ad3ff3","Type":"ContainerStarted","Data":"a926b90ef3dc2e51fc512015eddf71423a619dfff1f0330fa350fe95921ff3eb"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.200269 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" containerID="627f4ebe6bb78ae8ff32c46dccaaae1575f38a3fc98a89b60a2ba53081e516aa" exitCode=0 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.200383 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f448-account-create-update-976jr" event={"ID":"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2","Type":"ContainerDied","Data":"627f4ebe6bb78ae8ff32c46dccaaae1575f38a3fc98a89b60a2ba53081e516aa"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.200417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f448-account-create-update-976jr" event={"ID":"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2","Type":"ContainerStarted","Data":"f592eb06323ed6abff2076f2bf956fd98ce5bbb29b289eaef35d0c9fddc53e27"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.207612 4857 generic.go:334] "Generic (PLEG): container finished" podID="e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" containerID="f69cd95a48cf6d256599abb9dd70bf305cf5ea79a69a64ffbfe2a65b72da84f1" exitCode=0 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.207698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8199-account-create-update-7ssjr" event={"ID":"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f","Type":"ContainerDied","Data":"f69cd95a48cf6d256599abb9dd70bf305cf5ea79a69a64ffbfe2a65b72da84f1"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.209336 4857 generic.go:334] "Generic (PLEG): container finished" podID="1237773a-dc19-47c6-90cc-eb0de954d9b4" containerID="4deb4acfd0651a7814aadc87831b4c8900ce8d2bd9486f8f6a0ff81b7dd4ddc1" exitCode=0 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.209410 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8ngsm" event={"ID":"1237773a-dc19-47c6-90cc-eb0de954d9b4","Type":"ContainerDied","Data":"4deb4acfd0651a7814aadc87831b4c8900ce8d2bd9486f8f6a0ff81b7dd4ddc1"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.209432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8ngsm" event={"ID":"1237773a-dc19-47c6-90cc-eb0de954d9b4","Type":"ContainerStarted","Data":"b7cd3401bad7151c929a70fa4d247a62241e7b450d9c3e2f64a843866c7cf1b5"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.210631 4857 generic.go:334] "Generic (PLEG): container finished" podID="ead17b20-d23f-4794-9f28-4a536c60c48c" containerID="efbe8acece666741759d5f9a21f2800249919a91366fee8526441705dee2a5dc" exitCode=0 Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.210779 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0f31-account-create-update-zmqth" event={"ID":"ead17b20-d23f-4794-9f28-4a536c60c48c","Type":"ContainerDied","Data":"efbe8acece666741759d5f9a21f2800249919a91366fee8526441705dee2a5dc"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.210818 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0f31-account-create-update-zmqth" event={"ID":"ead17b20-d23f-4794-9f28-4a536c60c48c","Type":"ContainerStarted","Data":"2e7e346d4eb14db42d5b8110bad8a5529098e8dce3d8363c114fc83cb03c1fbd"} Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.570472 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.643375 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cv7z4\" (UniqueName: \"kubernetes.io/projected/967609ba-f165-4bfa-b13d-d23154c329e7-kube-api-access-cv7z4\") pod \"967609ba-f165-4bfa-b13d-d23154c329e7\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.643831 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/967609ba-f165-4bfa-b13d-d23154c329e7-operator-scripts\") pod \"967609ba-f165-4bfa-b13d-d23154c329e7\" (UID: \"967609ba-f165-4bfa-b13d-d23154c329e7\") " Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.644677 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/967609ba-f165-4bfa-b13d-d23154c329e7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "967609ba-f165-4bfa-b13d-d23154c329e7" (UID: "967609ba-f165-4bfa-b13d-d23154c329e7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.649647 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/967609ba-f165-4bfa-b13d-d23154c329e7-kube-api-access-cv7z4" (OuterVolumeSpecName: "kube-api-access-cv7z4") pod "967609ba-f165-4bfa-b13d-d23154c329e7" (UID: "967609ba-f165-4bfa-b13d-d23154c329e7"). InnerVolumeSpecName "kube-api-access-cv7z4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.746876 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cv7z4\" (UniqueName: \"kubernetes.io/projected/967609ba-f165-4bfa-b13d-d23154c329e7-kube-api-access-cv7z4\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:57 crc kubenswrapper[4857]: I1128 13:50:57.746918 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/967609ba-f165-4bfa-b13d-d23154c329e7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.222895 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-s5qtj" event={"ID":"967609ba-f165-4bfa-b13d-d23154c329e7","Type":"ContainerDied","Data":"5aea58e6102817516d8d96df45e6a3c451f89fb4e8ec4c19055f0fb342171b37"} Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.222939 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5aea58e6102817516d8d96df45e6a3c451f89fb4e8ec4c19055f0fb342171b37" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.222970 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-s5qtj" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.225100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" event={"ID":"61e58ae5-9323-422f-ae62-a88146ae3beb","Type":"ContainerStarted","Data":"b5d127bdd61fecc173b260c4f1511da50b4e298e9d3039f6515ad85b7a11a048"} Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.226335 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.257760 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" podStartSLOduration=3.257723001 podStartE2EDuration="3.257723001s" podCreationTimestamp="2025-11-28 13:50:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:50:58.255272065 +0000 UTC m=+1308.379213502" watchObservedRunningTime="2025-11-28 13:50:58.257723001 +0000 UTC m=+1308.381664438" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.610409 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.667643 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-operator-scripts\") pod \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.667772 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcs7x\" (UniqueName: \"kubernetes.io/projected/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-kube-api-access-xcs7x\") pod \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\" (UID: \"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f\") " Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.668568 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" (UID: "e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.691896 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-kube-api-access-xcs7x" (OuterVolumeSpecName: "kube-api-access-xcs7x") pod "e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" (UID: "e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f"). InnerVolumeSpecName "kube-api-access-xcs7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.770995 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:58 crc kubenswrapper[4857]: I1128 13:50:58.771034 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcs7x\" (UniqueName: \"kubernetes.io/projected/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f-kube-api-access-xcs7x\") on node \"crc\" DevicePath \"\"" Nov 28 13:50:59 crc kubenswrapper[4857]: I1128 13:50:59.235450 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-8199-account-create-update-7ssjr" event={"ID":"e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f","Type":"ContainerDied","Data":"bf55e26660029126cb8b3bc70c827ba48dc522c62836570f24eaa4a0a4051c63"} Nov 28 13:50:59 crc kubenswrapper[4857]: I1128 13:50:59.235482 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf55e26660029126cb8b3bc70c827ba48dc522c62836570f24eaa4a0a4051c63" Nov 28 13:50:59 crc kubenswrapper[4857]: I1128 13:50:59.235530 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-8199-account-create-update-7ssjr" Nov 28 13:50:59 crc kubenswrapper[4857]: I1128 13:50:59.247113 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-f8mpb" event={"ID":"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed","Type":"ContainerStarted","Data":"72bfcc6e31ffdfc856e26b2344ae2a527b77914dd30b292c69535d00d195afeb"} Nov 28 13:50:59 crc kubenswrapper[4857]: I1128 13:50:59.265731 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-f8mpb" podStartSLOduration=7.601609829 podStartE2EDuration="42.265710887s" podCreationTimestamp="2025-11-28 13:50:17 +0000 UTC" firstStartedPulling="2025-11-28 13:50:23.089787299 +0000 UTC m=+1273.213728736" lastFinishedPulling="2025-11-28 13:50:57.753888357 +0000 UTC m=+1307.877829794" observedRunningTime="2025-11-28 13:50:59.259782048 +0000 UTC m=+1309.383723495" watchObservedRunningTime="2025-11-28 13:50:59.265710887 +0000 UTC m=+1309.389652324" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.134501 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8ngsm" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.143263 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.157380 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.188788 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-csvmd" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.229652 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1237773a-dc19-47c6-90cc-eb0de954d9b4-operator-scripts\") pod \"1237773a-dc19-47c6-90cc-eb0de954d9b4\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.229766 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95ndr\" (UniqueName: \"kubernetes.io/projected/ead17b20-d23f-4794-9f28-4a536c60c48c-kube-api-access-95ndr\") pod \"ead17b20-d23f-4794-9f28-4a536c60c48c\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.229907 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead17b20-d23f-4794-9f28-4a536c60c48c-operator-scripts\") pod \"ead17b20-d23f-4794-9f28-4a536c60c48c\" (UID: \"ead17b20-d23f-4794-9f28-4a536c60c48c\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.230019 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8kns\" (UniqueName: \"kubernetes.io/projected/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-kube-api-access-m8kns\") pod \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.230067 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-operator-scripts\") pod \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\" (UID: \"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.230089 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq6vn\" (UniqueName: \"kubernetes.io/projected/1237773a-dc19-47c6-90cc-eb0de954d9b4-kube-api-access-kq6vn\") pod \"1237773a-dc19-47c6-90cc-eb0de954d9b4\" (UID: \"1237773a-dc19-47c6-90cc-eb0de954d9b4\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.234369 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead17b20-d23f-4794-9f28-4a536c60c48c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ead17b20-d23f-4794-9f28-4a536c60c48c" (UID: "ead17b20-d23f-4794-9f28-4a536c60c48c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.234691 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1237773a-dc19-47c6-90cc-eb0de954d9b4-kube-api-access-kq6vn" (OuterVolumeSpecName: "kube-api-access-kq6vn") pod "1237773a-dc19-47c6-90cc-eb0de954d9b4" (UID: "1237773a-dc19-47c6-90cc-eb0de954d9b4"). InnerVolumeSpecName "kube-api-access-kq6vn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.234778 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1237773a-dc19-47c6-90cc-eb0de954d9b4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1237773a-dc19-47c6-90cc-eb0de954d9b4" (UID: "1237773a-dc19-47c6-90cc-eb0de954d9b4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.235605 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" (UID: "4c66fec5-8c9d-44f0-8a86-8dd74ea565b2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.242542 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-kube-api-access-m8kns" (OuterVolumeSpecName: "kube-api-access-m8kns") pod "4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" (UID: "4c66fec5-8c9d-44f0-8a86-8dd74ea565b2"). InnerVolumeSpecName "kube-api-access-m8kns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.244161 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ead17b20-d23f-4794-9f28-4a536c60c48c-kube-api-access-95ndr" (OuterVolumeSpecName: "kube-api-access-95ndr") pod "ead17b20-d23f-4794-9f28-4a536c60c48c" (UID: "ead17b20-d23f-4794-9f28-4a536c60c48c"). InnerVolumeSpecName "kube-api-access-95ndr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.283637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f448-account-create-update-976jr" event={"ID":"4c66fec5-8c9d-44f0-8a86-8dd74ea565b2","Type":"ContainerDied","Data":"f592eb06323ed6abff2076f2bf956fd98ce5bbb29b289eaef35d0c9fddc53e27"} Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.283697 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f592eb06323ed6abff2076f2bf956fd98ce5bbb29b289eaef35d0c9fddc53e27" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.284027 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f448-account-create-update-976jr" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.287352 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-8ngsm" event={"ID":"1237773a-dc19-47c6-90cc-eb0de954d9b4","Type":"ContainerDied","Data":"b7cd3401bad7151c929a70fa4d247a62241e7b450d9c3e2f64a843866c7cf1b5"} Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.287381 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7cd3401bad7151c929a70fa4d247a62241e7b450d9c3e2f64a843866c7cf1b5" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.287431 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-8ngsm" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.290846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-0f31-account-create-update-zmqth" event={"ID":"ead17b20-d23f-4794-9f28-4a536c60c48c","Type":"ContainerDied","Data":"2e7e346d4eb14db42d5b8110bad8a5529098e8dce3d8363c114fc83cb03c1fbd"} Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.290872 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e7e346d4eb14db42d5b8110bad8a5529098e8dce3d8363c114fc83cb03c1fbd" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.290896 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-0f31-account-create-update-zmqth" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.294538 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-55l57" event={"ID":"d0502ed7-3dd0-48b6-ba25-44dae8b21aef","Type":"ContainerStarted","Data":"a0d911c80b3a48f7fb005ed2c796f1f446d2c4a6a5bdf08ff5f72bbc9aaec0f8"} Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.296274 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-csvmd" event={"ID":"2df15232-d07b-49ae-99b9-60bb31ad3ff3","Type":"ContainerDied","Data":"a926b90ef3dc2e51fc512015eddf71423a619dfff1f0330fa350fe95921ff3eb"} Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.296321 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a926b90ef3dc2e51fc512015eddf71423a619dfff1f0330fa350fe95921ff3eb" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.296444 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-csvmd" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.315004 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-55l57" podStartSLOduration=2.466412137 podStartE2EDuration="8.314976986s" podCreationTimestamp="2025-11-28 13:50:54 +0000 UTC" firstStartedPulling="2025-11-28 13:50:56.150169054 +0000 UTC m=+1306.274110491" lastFinishedPulling="2025-11-28 13:51:01.998733903 +0000 UTC m=+1312.122675340" observedRunningTime="2025-11-28 13:51:02.308528103 +0000 UTC m=+1312.432469550" watchObservedRunningTime="2025-11-28 13:51:02.314976986 +0000 UTC m=+1312.438918443" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.331766 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbwr9\" (UniqueName: \"kubernetes.io/projected/2df15232-d07b-49ae-99b9-60bb31ad3ff3-kube-api-access-bbwr9\") pod \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332074 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df15232-d07b-49ae-99b9-60bb31ad3ff3-operator-scripts\") pod \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\" (UID: \"2df15232-d07b-49ae-99b9-60bb31ad3ff3\") " Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332457 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1237773a-dc19-47c6-90cc-eb0de954d9b4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332522 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95ndr\" (UniqueName: \"kubernetes.io/projected/ead17b20-d23f-4794-9f28-4a536c60c48c-kube-api-access-95ndr\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332585 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ead17b20-d23f-4794-9f28-4a536c60c48c-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332640 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8kns\" (UniqueName: \"kubernetes.io/projected/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-kube-api-access-m8kns\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332696 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332967 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq6vn\" (UniqueName: \"kubernetes.io/projected/1237773a-dc19-47c6-90cc-eb0de954d9b4-kube-api-access-kq6vn\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.332675 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2df15232-d07b-49ae-99b9-60bb31ad3ff3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2df15232-d07b-49ae-99b9-60bb31ad3ff3" (UID: "2df15232-d07b-49ae-99b9-60bb31ad3ff3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.335425 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2df15232-d07b-49ae-99b9-60bb31ad3ff3-kube-api-access-bbwr9" (OuterVolumeSpecName: "kube-api-access-bbwr9") pod "2df15232-d07b-49ae-99b9-60bb31ad3ff3" (UID: "2df15232-d07b-49ae-99b9-60bb31ad3ff3"). InnerVolumeSpecName "kube-api-access-bbwr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.435037 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbwr9\" (UniqueName: \"kubernetes.io/projected/2df15232-d07b-49ae-99b9-60bb31ad3ff3-kube-api-access-bbwr9\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:02 crc kubenswrapper[4857]: I1128 13:51:02.435083 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2df15232-d07b-49ae-99b9-60bb31ad3ff3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:05 crc kubenswrapper[4857]: I1128 13:51:05.323143 4857 generic.go:334] "Generic (PLEG): container finished" podID="d0502ed7-3dd0-48b6-ba25-44dae8b21aef" containerID="a0d911c80b3a48f7fb005ed2c796f1f446d2c4a6a5bdf08ff5f72bbc9aaec0f8" exitCode=0 Nov 28 13:51:05 crc kubenswrapper[4857]: I1128 13:51:05.323236 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-55l57" event={"ID":"d0502ed7-3dd0-48b6-ba25-44dae8b21aef","Type":"ContainerDied","Data":"a0d911c80b3a48f7fb005ed2c796f1f446d2c4a6a5bdf08ff5f72bbc9aaec0f8"} Nov 28 13:51:05 crc kubenswrapper[4857]: I1128 13:51:05.817732 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:51:05 crc kubenswrapper[4857]: I1128 13:51:05.872128 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-52vgm"] Nov 28 13:51:05 crc kubenswrapper[4857]: I1128 13:51:05.872365 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" podUID="03eee55b-b660-45e7-91db-4491a89910ad" containerName="dnsmasq-dns" containerID="cri-o://42acf378391cd4c6cf088ef3bd14951905d0f62836c6ba0285422f9e6ac10b74" gracePeriod=10 Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.351367 4857 generic.go:334] "Generic (PLEG): container finished" podID="03eee55b-b660-45e7-91db-4491a89910ad" containerID="42acf378391cd4c6cf088ef3bd14951905d0f62836c6ba0285422f9e6ac10b74" exitCode=0 Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.351562 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" event={"ID":"03eee55b-b660-45e7-91db-4491a89910ad","Type":"ContainerDied","Data":"42acf378391cd4c6cf088ef3bd14951905d0f62836c6ba0285422f9e6ac10b74"} Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.456089 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.599479 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdhzb\" (UniqueName: \"kubernetes.io/projected/03eee55b-b660-45e7-91db-4491a89910ad-kube-api-access-mdhzb\") pod \"03eee55b-b660-45e7-91db-4491a89910ad\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.600577 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-sb\") pod \"03eee55b-b660-45e7-91db-4491a89910ad\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.600622 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-dns-svc\") pod \"03eee55b-b660-45e7-91db-4491a89910ad\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.600645 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-config\") pod \"03eee55b-b660-45e7-91db-4491a89910ad\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.600736 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-nb\") pod \"03eee55b-b660-45e7-91db-4491a89910ad\" (UID: \"03eee55b-b660-45e7-91db-4491a89910ad\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.606357 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03eee55b-b660-45e7-91db-4491a89910ad-kube-api-access-mdhzb" (OuterVolumeSpecName: "kube-api-access-mdhzb") pod "03eee55b-b660-45e7-91db-4491a89910ad" (UID: "03eee55b-b660-45e7-91db-4491a89910ad"). InnerVolumeSpecName "kube-api-access-mdhzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.647508 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-55l57" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.651240 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "03eee55b-b660-45e7-91db-4491a89910ad" (UID: "03eee55b-b660-45e7-91db-4491a89910ad"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.653479 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-config" (OuterVolumeSpecName: "config") pod "03eee55b-b660-45e7-91db-4491a89910ad" (UID: "03eee55b-b660-45e7-91db-4491a89910ad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.715393 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "03eee55b-b660-45e7-91db-4491a89910ad" (UID: "03eee55b-b660-45e7-91db-4491a89910ad"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716007 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-config-data\") pod \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716041 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lbpl\" (UniqueName: \"kubernetes.io/projected/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-kube-api-access-6lbpl\") pod \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716164 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-combined-ca-bundle\") pod \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\" (UID: \"d0502ed7-3dd0-48b6-ba25-44dae8b21aef\") " Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716529 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdhzb\" (UniqueName: \"kubernetes.io/projected/03eee55b-b660-45e7-91db-4491a89910ad-kube-api-access-mdhzb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716547 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716555 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.716565 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.717022 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "03eee55b-b660-45e7-91db-4491a89910ad" (UID: "03eee55b-b660-45e7-91db-4491a89910ad"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.719838 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-kube-api-access-6lbpl" (OuterVolumeSpecName: "kube-api-access-6lbpl") pod "d0502ed7-3dd0-48b6-ba25-44dae8b21aef" (UID: "d0502ed7-3dd0-48b6-ba25-44dae8b21aef"). InnerVolumeSpecName "kube-api-access-6lbpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.741548 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0502ed7-3dd0-48b6-ba25-44dae8b21aef" (UID: "d0502ed7-3dd0-48b6-ba25-44dae8b21aef"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.760465 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-config-data" (OuterVolumeSpecName: "config-data") pod "d0502ed7-3dd0-48b6-ba25-44dae8b21aef" (UID: "d0502ed7-3dd0-48b6-ba25-44dae8b21aef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.820580 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/03eee55b-b660-45e7-91db-4491a89910ad-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.820820 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.820836 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:06 crc kubenswrapper[4857]: I1128 13:51:06.820859 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lbpl\" (UniqueName: \"kubernetes.io/projected/d0502ed7-3dd0-48b6-ba25-44dae8b21aef-kube-api-access-6lbpl\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.364846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-55l57" event={"ID":"d0502ed7-3dd0-48b6-ba25-44dae8b21aef","Type":"ContainerDied","Data":"ce5bbe5f1d9aef1e01817a3d215d02b17205a5930d945e9f735778dc4a90027c"} Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.364886 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-55l57" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.364902 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce5bbe5f1d9aef1e01817a3d215d02b17205a5930d945e9f735778dc4a90027c" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.370000 4857 generic.go:334] "Generic (PLEG): container finished" podID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" containerID="72bfcc6e31ffdfc856e26b2344ae2a527b77914dd30b292c69535d00d195afeb" exitCode=0 Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.370086 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-f8mpb" event={"ID":"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed","Type":"ContainerDied","Data":"72bfcc6e31ffdfc856e26b2344ae2a527b77914dd30b292c69535d00d195afeb"} Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.374906 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" event={"ID":"03eee55b-b660-45e7-91db-4491a89910ad","Type":"ContainerDied","Data":"8721d9c897df39ee23bffb1d3c58e7ebf746fed236910d50fd3130807eb10252"} Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.374969 4857 scope.go:117] "RemoveContainer" containerID="42acf378391cd4c6cf088ef3bd14951905d0f62836c6ba0285422f9e6ac10b74" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.375008 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-52vgm" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.405503 4857 scope.go:117] "RemoveContainer" containerID="bed62605c70428e6de44941ccf888faecc30e99f6c13e0d0cef8fbd5f61f11da" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.412105 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-52vgm"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.422677 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-52vgm"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503079 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b868669f-qpg6v"] Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503430 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ead17b20-d23f-4794-9f28-4a536c60c48c" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503453 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ead17b20-d23f-4794-9f28-4a536c60c48c" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503473 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1237773a-dc19-47c6-90cc-eb0de954d9b4" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503485 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1237773a-dc19-47c6-90cc-eb0de954d9b4" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503501 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0502ed7-3dd0-48b6-ba25-44dae8b21aef" containerName="keystone-db-sync" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503509 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0502ed7-3dd0-48b6-ba25-44dae8b21aef" containerName="keystone-db-sync" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503525 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503532 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503545 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="967609ba-f165-4bfa-b13d-d23154c329e7" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503552 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="967609ba-f165-4bfa-b13d-d23154c329e7" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503576 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503583 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503598 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df15232-d07b-49ae-99b9-60bb31ad3ff3" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503605 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df15232-d07b-49ae-99b9-60bb31ad3ff3" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503615 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03eee55b-b660-45e7-91db-4491a89910ad" containerName="dnsmasq-dns" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503622 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="03eee55b-b660-45e7-91db-4491a89910ad" containerName="dnsmasq-dns" Nov 28 13:51:07 crc kubenswrapper[4857]: E1128 13:51:07.503635 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03eee55b-b660-45e7-91db-4491a89910ad" containerName="init" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503641 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="03eee55b-b660-45e7-91db-4491a89910ad" containerName="init" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503830 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="03eee55b-b660-45e7-91db-4491a89910ad" containerName="dnsmasq-dns" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503854 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503868 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ead17b20-d23f-4794-9f28-4a536c60c48c" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503883 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1237773a-dc19-47c6-90cc-eb0de954d9b4" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503892 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="967609ba-f165-4bfa-b13d-d23154c329e7" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503902 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df15232-d07b-49ae-99b9-60bb31ad3ff3" containerName="mariadb-database-create" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503916 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0502ed7-3dd0-48b6-ba25-44dae8b21aef" containerName="keystone-db-sync" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.503929 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" containerName="mariadb-account-create-update" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.505169 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.525587 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-qpg6v"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.584092 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-92t2k"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.585127 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.592713 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.592801 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.593083 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49rtd" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.593149 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.593236 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.610432 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-92t2k"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.632692 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-svc\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.632758 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cv2l\" (UniqueName: \"kubernetes.io/projected/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-kube-api-access-7cv2l\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.632805 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.632830 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-config\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.632853 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.632893 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.734882 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-scripts\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735179 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-svc\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735265 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw5hr\" (UniqueName: \"kubernetes.io/projected/0c8cf548-437e-4662-89ea-46923e0611dd-kube-api-access-gw5hr\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cv2l\" (UniqueName: \"kubernetes.io/projected/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-kube-api-access-7cv2l\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735428 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-combined-ca-bundle\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735480 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735501 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-config-data\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735540 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-config\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735670 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735725 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735750 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-credential-keys\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.735846 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-fernet-keys\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.736460 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-config\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.736471 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-svc\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.736530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.736732 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.736863 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.749348 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-f9rw6"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.750687 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.752464 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.752482 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.761177 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-f9rw6"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.799346 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-d8fnj" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.803093 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cv2l\" (UniqueName: \"kubernetes.io/projected/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-kube-api-access-7cv2l\") pod \"dnsmasq-dns-5b868669f-qpg6v\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.821610 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.822477 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.824389 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.828028 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.837383 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838325 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-fernet-keys\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838362 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aebc3704-6a31-4813-8826-622ffb7f6934-etc-machine-id\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838383 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m64mr\" (UniqueName: \"kubernetes.io/projected/aebc3704-6a31-4813-8826-622ffb7f6934-kube-api-access-m64mr\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838415 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-scripts\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838454 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-scripts\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838474 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-config-data\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838497 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw5hr\" (UniqueName: \"kubernetes.io/projected/0c8cf548-437e-4662-89ea-46923e0611dd-kube-api-access-gw5hr\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838535 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-combined-ca-bundle\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838557 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-combined-ca-bundle\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838580 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-config-data\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-db-sync-config-data\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.838628 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-credential-keys\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.841698 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-credential-keys\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.845107 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-combined-ca-bundle\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.848592 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-fernet-keys\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.853681 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-config-data\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.859582 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-scripts\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.884717 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.907498 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw5hr\" (UniqueName: \"kubernetes.io/projected/0c8cf548-437e-4662-89ea-46923e0611dd-kube-api-access-gw5hr\") pod \"keystone-bootstrap-92t2k\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.908672 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.930109 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-sb67r"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.931237 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.936887 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.937329 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-bhzgs" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.937452 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939760 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939808 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-config-data\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939834 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aebc3704-6a31-4813-8826-622ffb7f6934-etc-machine-id\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-log-httpd\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939866 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m64mr\" (UniqueName: \"kubernetes.io/projected/aebc3704-6a31-4813-8826-622ffb7f6934-kube-api-access-m64mr\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939912 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-scripts\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.939930 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr9fn\" (UniqueName: \"kubernetes.io/projected/9b948893-d309-41ca-987c-287ee0b12ef2-kube-api-access-pr9fn\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940098 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-config-data\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940143 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-scripts\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940172 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-combined-ca-bundle\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940195 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-run-httpd\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940213 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940238 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-db-sync-config-data\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.940861 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aebc3704-6a31-4813-8826-622ffb7f6934-etc-machine-id\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.943805 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-scripts\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.951125 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-db-sync-config-data\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.952107 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-config-data\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.952729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-combined-ca-bundle\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.967306 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-sb67r"] Nov 28 13:51:07 crc kubenswrapper[4857]: I1128 13:51:07.967690 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m64mr\" (UniqueName: \"kubernetes.io/projected/aebc3704-6a31-4813-8826-622ffb7f6934-kube-api-access-m64mr\") pod \"cinder-db-sync-f9rw6\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.016367 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-hw6nk"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.017490 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.026308 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.029869 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-kx7nh" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.030082 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045242 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-config\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045321 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-log-httpd\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045395 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-combined-ca-bundle\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045493 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr9fn\" (UniqueName: \"kubernetes.io/projected/9b948893-d309-41ca-987c-287ee0b12ef2-kube-api-access-pr9fn\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045564 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-scripts\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045646 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-run-httpd\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045695 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045731 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64gpp\" (UniqueName: \"kubernetes.io/projected/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-kube-api-access-64gpp\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045811 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.045890 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-config-data\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.048702 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-log-httpd\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.065801 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.066189 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-run-httpd\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.066236 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-qpg6v"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.081980 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-scripts\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.086395 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-config-data\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.088018 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr9fn\" (UniqueName: \"kubernetes.io/projected/9b948893-d309-41ca-987c-287ee0b12ef2-kube-api-access-pr9fn\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.091856 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.103532 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-g4slf"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.111082 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.114709 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.115395 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-pqw8m" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.139292 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-hw6nk"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.147767 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-combined-ca-bundle\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-config-data\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150114 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-config\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150342 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-combined-ca-bundle\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150371 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-659wg\" (UniqueName: \"kubernetes.io/projected/b5b67972-d610-4d3c-96e3-b8a1593f196a-kube-api-access-659wg\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5b67972-d610-4d3c-96e3-b8a1593f196a-logs\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150450 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-scripts\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-combined-ca-bundle\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150859 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbgxz\" (UniqueName: \"kubernetes.io/projected/d9199b78-2d93-4877-95b8-ed8457716a3f-kube-api-access-qbgxz\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.150939 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64gpp\" (UniqueName: \"kubernetes.io/projected/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-kube-api-access-64gpp\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.151102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-db-sync-config-data\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.156301 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-g4slf"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.160801 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-combined-ca-bundle\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.181868 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-config\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.183694 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dx5fp"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.185419 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.195320 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dx5fp"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.196782 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64gpp\" (UniqueName: \"kubernetes.io/projected/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-kube-api-access-64gpp\") pod \"neutron-db-sync-sb67r\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.234832 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.244434 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03eee55b-b660-45e7-91db-4491a89910ad" path="/var/lib/kubelet/pods/03eee55b-b660-45e7-91db-4491a89910ad/volumes" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.269523 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270691 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-659wg\" (UniqueName: \"kubernetes.io/projected/b5b67972-d610-4d3c-96e3-b8a1593f196a-kube-api-access-659wg\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270721 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5b67972-d610-4d3c-96e3-b8a1593f196a-logs\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270749 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-scripts\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270781 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-svc\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270805 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-combined-ca-bundle\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270841 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270869 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbgxz\" (UniqueName: \"kubernetes.io/projected/d9199b78-2d93-4877-95b8-ed8457716a3f-kube-api-access-qbgxz\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270921 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp7v5\" (UniqueName: \"kubernetes.io/projected/38632ea8-8761-4f9c-ab8a-29540bf608b1-kube-api-access-wp7v5\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270959 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-config\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.270986 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-db-sync-config-data\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.271018 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-combined-ca-bundle\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.271083 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-config-data\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.271118 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.271890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5b67972-d610-4d3c-96e3-b8a1593f196a-logs\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.275598 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-db-sync-config-data\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.277819 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-scripts\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.283523 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-combined-ca-bundle\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.286237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-659wg\" (UniqueName: \"kubernetes.io/projected/b5b67972-d610-4d3c-96e3-b8a1593f196a-kube-api-access-659wg\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.288039 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-config-data\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.290972 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-combined-ca-bundle\") pod \"placement-db-sync-hw6nk\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.315465 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbgxz\" (UniqueName: \"kubernetes.io/projected/d9199b78-2d93-4877-95b8-ed8457716a3f-kube-api-access-qbgxz\") pod \"barbican-db-sync-g4slf\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.348397 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.387684 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.387811 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.387841 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp7v5\" (UniqueName: \"kubernetes.io/projected/38632ea8-8761-4f9c-ab8a-29540bf608b1-kube-api-access-wp7v5\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.387865 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-config\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.388108 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.389169 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.389730 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-config\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.391169 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.391691 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.398544 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-svc\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.402747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-svc\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.425750 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp7v5\" (UniqueName: \"kubernetes.io/projected/38632ea8-8761-4f9c-ab8a-29540bf608b1-kube-api-access-wp7v5\") pod \"dnsmasq-dns-cf78879c9-dx5fp\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.531699 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.567849 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-qpg6v"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.570362 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.610854 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.817960 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-92t2k"] Nov 28 13:51:08 crc kubenswrapper[4857]: I1128 13:51:08.935814 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-f9rw6"] Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.017511 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.281607 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-sb67r"] Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.345602 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dx5fp"] Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.408144 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-f8mpb" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.460079 4857 generic.go:334] "Generic (PLEG): container finished" podID="4fd8fd83-9147-4bbd-80f4-1eb001e2673b" containerID="c1a82624a73c6b39c0714b102d769e4eb4c4f1e398ca36aba8d63b74f7e45120" exitCode=0 Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.460169 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" event={"ID":"4fd8fd83-9147-4bbd-80f4-1eb001e2673b","Type":"ContainerDied","Data":"c1a82624a73c6b39c0714b102d769e4eb4c4f1e398ca36aba8d63b74f7e45120"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.460197 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" event={"ID":"4fd8fd83-9147-4bbd-80f4-1eb001e2673b","Type":"ContainerStarted","Data":"4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.468878 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerStarted","Data":"206e4d3af24f3f25b2d8df96d6b2cfb6b40b3fd31c43b1ffdd0ae90252851500"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.469759 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-hw6nk"] Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.472865 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-92t2k" event={"ID":"0c8cf548-437e-4662-89ea-46923e0611dd","Type":"ContainerStarted","Data":"ee86c723b1757e0a276683b36f03550d38ea8483e2efb0ba7d065c4d40bbb6ba"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.477022 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sb67r" event={"ID":"06f8c64b-0075-49d3-a2ae-0ecc1d03232a","Type":"ContainerStarted","Data":"3c953f2686f9b8e0e44dd33a85a9195be946924bf9dec1ca5dd90bff98f88750"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.483774 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-f9rw6" event={"ID":"aebc3704-6a31-4813-8826-622ffb7f6934","Type":"ContainerStarted","Data":"71bb2690ad07b8094b06711485d61ff101d924539bab8b0fa405a7467ff637b0"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.487759 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-g4slf"] Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.490736 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-f8mpb" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.491048 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-f8mpb" event={"ID":"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed","Type":"ContainerDied","Data":"3be0492c1c532768e1493cbb4c8674b305df1092cd1463bcccec56a5278d8b5a"} Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.491105 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3be0492c1c532768e1493cbb4c8674b305df1092cd1463bcccec56a5278d8b5a" Nov 28 13:51:09 crc kubenswrapper[4857]: W1128 13:51:09.494186 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5b67972_d610_4d3c_96e3_b8a1593f196a.slice/crio-f623d672907463ea8a92878394f5bec9ceeb80535f553f1a73713a088ecceb59 WatchSource:0}: Error finding container f623d672907463ea8a92878394f5bec9ceeb80535f553f1a73713a088ecceb59: Status 404 returned error can't find the container with id f623d672907463ea8a92878394f5bec9ceeb80535f553f1a73713a088ecceb59 Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.503152 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" event={"ID":"38632ea8-8761-4f9c-ab8a-29540bf608b1","Type":"ContainerStarted","Data":"d3e830df254efe4b11d3f56bfcf8f03118f9a31de4f5e79d0e1bbf3585ea7eee"} Nov 28 13:51:09 crc kubenswrapper[4857]: W1128 13:51:09.510696 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9199b78_2d93_4877_95b8_ed8457716a3f.slice/crio-8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930 WatchSource:0}: Error finding container 8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930: Status 404 returned error can't find the container with id 8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930 Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.533450 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-db-sync-config-data\") pod \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.533549 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ccmh\" (UniqueName: \"kubernetes.io/projected/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-kube-api-access-2ccmh\") pod \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.533596 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-combined-ca-bundle\") pod \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.533614 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-config-data\") pod \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\" (UID: \"55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.537536 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" (UID: "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.538774 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-kube-api-access-2ccmh" (OuterVolumeSpecName: "kube-api-access-2ccmh") pod "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" (UID: "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed"). InnerVolumeSpecName "kube-api-access-2ccmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.565053 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" (UID: "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.604988 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-config-data" (OuterVolumeSpecName: "config-data") pod "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" (UID: "55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.635355 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.635401 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ccmh\" (UniqueName: \"kubernetes.io/projected/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-kube-api-access-2ccmh\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.635417 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.635428 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.821908 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.945585 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-svc\") pod \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.945701 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-swift-storage-0\") pod \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.945781 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-config\") pod \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.945820 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-nb\") pod \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.945844 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cv2l\" (UniqueName: \"kubernetes.io/projected/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-kube-api-access-7cv2l\") pod \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.945896 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-sb\") pod \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\" (UID: \"4fd8fd83-9147-4bbd-80f4-1eb001e2673b\") " Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.963331 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-kube-api-access-7cv2l" (OuterVolumeSpecName: "kube-api-access-7cv2l") pod "4fd8fd83-9147-4bbd-80f4-1eb001e2673b" (UID: "4fd8fd83-9147-4bbd-80f4-1eb001e2673b"). InnerVolumeSpecName "kube-api-access-7cv2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.969709 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4fd8fd83-9147-4bbd-80f4-1eb001e2673b" (UID: "4fd8fd83-9147-4bbd-80f4-1eb001e2673b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.975658 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4fd8fd83-9147-4bbd-80f4-1eb001e2673b" (UID: "4fd8fd83-9147-4bbd-80f4-1eb001e2673b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.977617 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-config" (OuterVolumeSpecName: "config") pod "4fd8fd83-9147-4bbd-80f4-1eb001e2673b" (UID: "4fd8fd83-9147-4bbd-80f4-1eb001e2673b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.978165 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4fd8fd83-9147-4bbd-80f4-1eb001e2673b" (UID: "4fd8fd83-9147-4bbd-80f4-1eb001e2673b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:09 crc kubenswrapper[4857]: I1128 13:51:09.996695 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4fd8fd83-9147-4bbd-80f4-1eb001e2673b" (UID: "4fd8fd83-9147-4bbd-80f4-1eb001e2673b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.052096 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.052140 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.052156 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.052167 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.052179 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cv2l\" (UniqueName: \"kubernetes.io/projected/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-kube-api-access-7cv2l\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.052191 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fd8fd83-9147-4bbd-80f4-1eb001e2673b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.532746 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hw6nk" event={"ID":"b5b67972-d610-4d3c-96e3-b8a1593f196a","Type":"ContainerStarted","Data":"f623d672907463ea8a92878394f5bec9ceeb80535f553f1a73713a088ecceb59"} Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.536092 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-92t2k" event={"ID":"0c8cf548-437e-4662-89ea-46923e0611dd","Type":"ContainerStarted","Data":"71ec8c59a35a35a64c7fc0c905ef1b6853ea3563d5aeb022b63a5ab844099b86"} Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.540350 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sb67r" event={"ID":"06f8c64b-0075-49d3-a2ae-0ecc1d03232a","Type":"ContainerStarted","Data":"a608443173e09bdc122142915f62f799bb04de43dba7d386430145ce82247138"} Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.547603 4857 generic.go:334] "Generic (PLEG): container finished" podID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerID="527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69" exitCode=0 Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.548722 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" event={"ID":"38632ea8-8761-4f9c-ab8a-29540bf608b1","Type":"ContainerDied","Data":"527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69"} Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.551480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g4slf" event={"ID":"d9199b78-2d93-4877-95b8-ed8457716a3f","Type":"ContainerStarted","Data":"8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930"} Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.558902 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-92t2k" podStartSLOduration=3.55887769 podStartE2EDuration="3.55887769s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:10.555617362 +0000 UTC m=+1320.679558799" watchObservedRunningTime="2025-11-28 13:51:10.55887769 +0000 UTC m=+1320.682819127" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.567741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" event={"ID":"4fd8fd83-9147-4bbd-80f4-1eb001e2673b","Type":"ContainerDied","Data":"4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7"} Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.567798 4857 scope.go:117] "RemoveContainer" containerID="c1a82624a73c6b39c0714b102d769e4eb4c4f1e398ca36aba8d63b74f7e45120" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.567994 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-qpg6v" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.746737 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-sb67r" podStartSLOduration=3.746719297 podStartE2EDuration="3.746719297s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:10.675556431 +0000 UTC m=+1320.799497858" watchObservedRunningTime="2025-11-28 13:51:10.746719297 +0000 UTC m=+1320.870660734" Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.890374 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-qpg6v"] Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.936019 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-qpg6v"] Nov 28 13:51:10 crc kubenswrapper[4857]: I1128 13:51:10.959979 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dx5fp"] Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.095121 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.173464 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-ngbtv"] Nov 28 13:51:11 crc kubenswrapper[4857]: E1128 13:51:11.174265 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fd8fd83-9147-4bbd-80f4-1eb001e2673b" containerName="init" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.174283 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fd8fd83-9147-4bbd-80f4-1eb001e2673b" containerName="init" Nov 28 13:51:11 crc kubenswrapper[4857]: E1128 13:51:11.174303 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" containerName="glance-db-sync" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.174312 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" containerName="glance-db-sync" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.174597 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fd8fd83-9147-4bbd-80f4-1eb001e2673b" containerName="init" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.174617 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" containerName="glance-db-sync" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.176138 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.228600 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-ngbtv"] Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.345800 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.345846 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.345873 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.345892 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.346002 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t57sm\" (UniqueName: \"kubernetes.io/projected/1536ae25-51ba-49cc-af6f-7c7b9ceff289-kube-api-access-t57sm\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.346083 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-config\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.450644 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-config\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.450697 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.450718 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.450736 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.450754 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.450822 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t57sm\" (UniqueName: \"kubernetes.io/projected/1536ae25-51ba-49cc-af6f-7c7b9ceff289-kube-api-access-t57sm\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.451781 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-config\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.452323 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.452813 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.453398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.453883 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.494677 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t57sm\" (UniqueName: \"kubernetes.io/projected/1536ae25-51ba-49cc-af6f-7c7b9ceff289-kube-api-access-t57sm\") pod \"dnsmasq-dns-56df8fb6b7-ngbtv\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.576812 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.585062 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerName="dnsmasq-dns" containerID="cri-o://e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06" gracePeriod=10 Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.585073 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" event={"ID":"38632ea8-8761-4f9c-ab8a-29540bf608b1","Type":"ContainerStarted","Data":"e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06"} Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.585527 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.631353 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" podStartSLOduration=4.63133534 podStartE2EDuration="4.63133534s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:11.608992089 +0000 UTC m=+1321.732933526" watchObservedRunningTime="2025-11-28 13:51:11.63133534 +0000 UTC m=+1321.755276767" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.765745 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.767910 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.775963 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-x48r5" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.776470 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.777746 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.794745 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.961662 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-logs\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.962023 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.962060 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.962091 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.962121 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88dd9\" (UniqueName: \"kubernetes.io/projected/4742114d-6a22-4e30-b6c9-31a7701763d0-kube-api-access-88dd9\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.962147 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:11 crc kubenswrapper[4857]: I1128 13:51:11.962176 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064078 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-logs\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064226 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064380 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064437 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88dd9\" (UniqueName: \"kubernetes.io/projected/4742114d-6a22-4e30-b6c9-31a7701763d0-kube-api-access-88dd9\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064900 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-logs\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.064919 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.065154 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.072264 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-scripts\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.085610 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.087144 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-config-data\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.105123 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88dd9\" (UniqueName: \"kubernetes.io/projected/4742114d-6a22-4e30-b6c9-31a7701763d0-kube-api-access-88dd9\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.110365 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.162092 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-ngbtv"] Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.245616 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fd8fd83-9147-4bbd-80f4-1eb001e2673b" path="/var/lib/kubelet/pods/4fd8fd83-9147-4bbd-80f4-1eb001e2673b/volumes" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.403625 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.408009 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.409423 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.412157 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.418031 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.463118 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582143 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-nb\") pod \"38632ea8-8761-4f9c-ab8a-29540bf608b1\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582366 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-svc\") pod \"38632ea8-8761-4f9c-ab8a-29540bf608b1\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582389 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-swift-storage-0\") pod \"38632ea8-8761-4f9c-ab8a-29540bf608b1\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-sb\") pod \"38632ea8-8761-4f9c-ab8a-29540bf608b1\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-config\") pod \"38632ea8-8761-4f9c-ab8a-29540bf608b1\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582611 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp7v5\" (UniqueName: \"kubernetes.io/projected/38632ea8-8761-4f9c-ab8a-29540bf608b1-kube-api-access-wp7v5\") pod \"38632ea8-8761-4f9c-ab8a-29540bf608b1\" (UID: \"38632ea8-8761-4f9c-ab8a-29540bf608b1\") " Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582965 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2dd6\" (UniqueName: \"kubernetes.io/projected/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-kube-api-access-w2dd6\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.582994 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.583482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.583503 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.583553 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-logs\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.583587 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.588321 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38632ea8-8761-4f9c-ab8a-29540bf608b1-kube-api-access-wp7v5" (OuterVolumeSpecName: "kube-api-access-wp7v5") pod "38632ea8-8761-4f9c-ab8a-29540bf608b1" (UID: "38632ea8-8761-4f9c-ab8a-29540bf608b1"). InnerVolumeSpecName "kube-api-access-wp7v5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.610771 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" event={"ID":"1536ae25-51ba-49cc-af6f-7c7b9ceff289","Type":"ContainerStarted","Data":"7d2bd8e29d3a7fa4f57343292d152ea8bddb7c80c8eb3acb6f72875fcb9b851d"} Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.610843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" event={"ID":"1536ae25-51ba-49cc-af6f-7c7b9ceff289","Type":"ContainerStarted","Data":"422f7639f75cb11ba3e72c65f5d4ca9421c7fc1318e945c04382cd02ec50598d"} Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.616227 4857 generic.go:334] "Generic (PLEG): container finished" podID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerID="e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06" exitCode=0 Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.616268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" event={"ID":"38632ea8-8761-4f9c-ab8a-29540bf608b1","Type":"ContainerDied","Data":"e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06"} Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.616292 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" event={"ID":"38632ea8-8761-4f9c-ab8a-29540bf608b1","Type":"ContainerDied","Data":"d3e830df254efe4b11d3f56bfcf8f03118f9a31de4f5e79d0e1bbf3585ea7eee"} Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.616308 4857 scope.go:117] "RemoveContainer" containerID="e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.616434 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-dx5fp" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.669413 4857 scope.go:117] "RemoveContainer" containerID="527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.684780 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-config" (OuterVolumeSpecName: "config") pod "38632ea8-8761-4f9c-ab8a-29540bf608b1" (UID: "38632ea8-8761-4f9c-ab8a-29540bf608b1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.687930 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "38632ea8-8761-4f9c-ab8a-29540bf608b1" (UID: "38632ea8-8761-4f9c-ab8a-29540bf608b1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.688352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.688457 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2dd6\" (UniqueName: \"kubernetes.io/projected/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-kube-api-access-w2dd6\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.688612 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.690008 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.690112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.690222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-logs\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.690588 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.688825 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.690808 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-logs\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.691002 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.691041 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.691057 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp7v5\" (UniqueName: \"kubernetes.io/projected/38632ea8-8761-4f9c-ab8a-29540bf608b1-kube-api-access-wp7v5\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.691268 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.693217 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.694069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-config-data\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.695905 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-scripts\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.698935 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "38632ea8-8761-4f9c-ab8a-29540bf608b1" (UID: "38632ea8-8761-4f9c-ab8a-29540bf608b1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.709625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2dd6\" (UniqueName: \"kubernetes.io/projected/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-kube-api-access-w2dd6\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.717232 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "38632ea8-8761-4f9c-ab8a-29540bf608b1" (UID: "38632ea8-8761-4f9c-ab8a-29540bf608b1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.724200 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "38632ea8-8761-4f9c-ab8a-29540bf608b1" (UID: "38632ea8-8761-4f9c-ab8a-29540bf608b1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.730584 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.750705 4857 scope.go:117] "RemoveContainer" containerID="e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06" Nov 28 13:51:12 crc kubenswrapper[4857]: E1128 13:51:12.751327 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06\": container with ID starting with e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06 not found: ID does not exist" containerID="e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.751361 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06"} err="failed to get container status \"e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06\": rpc error: code = NotFound desc = could not find container \"e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06\": container with ID starting with e6c86874570692a2b8c011dc39509626713cba771845bedc90b0e3898cdbba06 not found: ID does not exist" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.751382 4857 scope.go:117] "RemoveContainer" containerID="527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69" Nov 28 13:51:12 crc kubenswrapper[4857]: E1128 13:51:12.751609 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69\": container with ID starting with 527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69 not found: ID does not exist" containerID="527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.751630 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69"} err="failed to get container status \"527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69\": rpc error: code = NotFound desc = could not find container \"527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69\": container with ID starting with 527bfa1ed3f6ad3726351b418bc38849cf6d8e2c370f11a65523db317ef69f69 not found: ID does not exist" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.787433 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.793058 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.793120 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:12 crc kubenswrapper[4857]: I1128 13:51:12.793135 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38632ea8-8761-4f9c-ab8a-29540bf608b1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.016413 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dx5fp"] Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.039900 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-dx5fp"] Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.100734 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:13 crc kubenswrapper[4857]: W1128 13:51:13.113661 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4742114d_6a22_4e30_b6c9_31a7701763d0.slice/crio-8fa66d44052f36b03e2cbaf89214d8016057c0b84901e1dca4afb599c56e0f70 WatchSource:0}: Error finding container 8fa66d44052f36b03e2cbaf89214d8016057c0b84901e1dca4afb599c56e0f70: Status 404 returned error can't find the container with id 8fa66d44052f36b03e2cbaf89214d8016057c0b84901e1dca4afb599c56e0f70 Nov 28 13:51:13 crc kubenswrapper[4857]: W1128 13:51:13.448959 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod703d70c6_c4a2_4ea3_a547_6da82c40a8aa.slice/crio-3991f29e214f395bc91ce3aa26eff523543fb5f3e08b6258471afcced639e300 WatchSource:0}: Error finding container 3991f29e214f395bc91ce3aa26eff523543fb5f3e08b6258471afcced639e300: Status 404 returned error can't find the container with id 3991f29e214f395bc91ce3aa26eff523543fb5f3e08b6258471afcced639e300 Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.451764 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.668872 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4742114d-6a22-4e30-b6c9-31a7701763d0","Type":"ContainerStarted","Data":"8fa66d44052f36b03e2cbaf89214d8016057c0b84901e1dca4afb599c56e0f70"} Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.673728 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"703d70c6-c4a2-4ea3-a547-6da82c40a8aa","Type":"ContainerStarted","Data":"3991f29e214f395bc91ce3aa26eff523543fb5f3e08b6258471afcced639e300"} Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.677665 4857 generic.go:334] "Generic (PLEG): container finished" podID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerID="7d2bd8e29d3a7fa4f57343292d152ea8bddb7c80c8eb3acb6f72875fcb9b851d" exitCode=0 Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.677741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" event={"ID":"1536ae25-51ba-49cc-af6f-7c7b9ceff289","Type":"ContainerDied","Data":"7d2bd8e29d3a7fa4f57343292d152ea8bddb7c80c8eb3acb6f72875fcb9b851d"} Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.677786 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" event={"ID":"1536ae25-51ba-49cc-af6f-7c7b9ceff289","Type":"ContainerStarted","Data":"e3de1d602e550769f65ce7b67f332e8faf3bf620ee41df192da5a83fde0c9cea"} Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.678211 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:13 crc kubenswrapper[4857]: I1128 13:51:13.697274 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" podStartSLOduration=2.697252887 podStartE2EDuration="2.697252887s" podCreationTimestamp="2025-11-28 13:51:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:13.696709573 +0000 UTC m=+1323.820651010" watchObservedRunningTime="2025-11-28 13:51:13.697252887 +0000 UTC m=+1323.821194324" Nov 28 13:51:14 crc kubenswrapper[4857]: I1128 13:51:14.242339 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" path="/var/lib/kubelet/pods/38632ea8-8761-4f9c-ab8a-29540bf608b1/volumes" Nov 28 13:51:14 crc kubenswrapper[4857]: I1128 13:51:14.694576 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4742114d-6a22-4e30-b6c9-31a7701763d0","Type":"ContainerStarted","Data":"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec"} Nov 28 13:51:14 crc kubenswrapper[4857]: I1128 13:51:14.698022 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"703d70c6-c4a2-4ea3-a547-6da82c40a8aa","Type":"ContainerStarted","Data":"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87"} Nov 28 13:51:15 crc kubenswrapper[4857]: I1128 13:51:15.707758 4857 generic.go:334] "Generic (PLEG): container finished" podID="0c8cf548-437e-4662-89ea-46923e0611dd" containerID="71ec8c59a35a35a64c7fc0c905ef1b6853ea3563d5aeb022b63a5ab844099b86" exitCode=0 Nov 28 13:51:15 crc kubenswrapper[4857]: I1128 13:51:15.707843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-92t2k" event={"ID":"0c8cf548-437e-4662-89ea-46923e0611dd","Type":"ContainerDied","Data":"71ec8c59a35a35a64c7fc0c905ef1b6853ea3563d5aeb022b63a5ab844099b86"} Nov 28 13:51:18 crc kubenswrapper[4857]: I1128 13:51:18.413639 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:18 crc kubenswrapper[4857]: I1128 13:51:18.487833 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:18 crc kubenswrapper[4857]: E1128 13:51:18.599623 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice/crio-4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice\": RecentStats: unable to find data in memory cache]" Nov 28 13:51:21 crc kubenswrapper[4857]: I1128 13:51:21.579123 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:51:21 crc kubenswrapper[4857]: I1128 13:51:21.679554 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rv95b"] Nov 28 13:51:21 crc kubenswrapper[4857]: I1128 13:51:21.683822 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="dnsmasq-dns" containerID="cri-o://b5d127bdd61fecc173b260c4f1511da50b4e298e9d3039f6515ad85b7a11a048" gracePeriod=10 Nov 28 13:51:23 crc kubenswrapper[4857]: I1128 13:51:22.783804 4857 generic.go:334] "Generic (PLEG): container finished" podID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerID="b5d127bdd61fecc173b260c4f1511da50b4e298e9d3039f6515ad85b7a11a048" exitCode=0 Nov 28 13:51:23 crc kubenswrapper[4857]: I1128 13:51:22.783997 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" event={"ID":"61e58ae5-9323-422f-ae62-a88146ae3beb","Type":"ContainerDied","Data":"b5d127bdd61fecc173b260c4f1511da50b4e298e9d3039f6515ad85b7a11a048"} Nov 28 13:51:25 crc kubenswrapper[4857]: I1128 13:51:25.816297 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: connect: connection refused" Nov 28 13:51:28 crc kubenswrapper[4857]: E1128 13:51:28.842670 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice/crio-4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice\": RecentStats: unable to find data in memory cache]" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.210911 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.342316 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-credential-keys\") pod \"0c8cf548-437e-4662-89ea-46923e0611dd\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.342464 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-config-data\") pod \"0c8cf548-437e-4662-89ea-46923e0611dd\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.342508 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-scripts\") pod \"0c8cf548-437e-4662-89ea-46923e0611dd\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.342578 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw5hr\" (UniqueName: \"kubernetes.io/projected/0c8cf548-437e-4662-89ea-46923e0611dd-kube-api-access-gw5hr\") pod \"0c8cf548-437e-4662-89ea-46923e0611dd\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.342607 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-fernet-keys\") pod \"0c8cf548-437e-4662-89ea-46923e0611dd\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.342645 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-combined-ca-bundle\") pod \"0c8cf548-437e-4662-89ea-46923e0611dd\" (UID: \"0c8cf548-437e-4662-89ea-46923e0611dd\") " Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.348791 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0c8cf548-437e-4662-89ea-46923e0611dd-kube-api-access-gw5hr" (OuterVolumeSpecName: "kube-api-access-gw5hr") pod "0c8cf548-437e-4662-89ea-46923e0611dd" (UID: "0c8cf548-437e-4662-89ea-46923e0611dd"). InnerVolumeSpecName "kube-api-access-gw5hr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.350682 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "0c8cf548-437e-4662-89ea-46923e0611dd" (UID: "0c8cf548-437e-4662-89ea-46923e0611dd"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.351442 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-scripts" (OuterVolumeSpecName: "scripts") pod "0c8cf548-437e-4662-89ea-46923e0611dd" (UID: "0c8cf548-437e-4662-89ea-46923e0611dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.351481 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "0c8cf548-437e-4662-89ea-46923e0611dd" (UID: "0c8cf548-437e-4662-89ea-46923e0611dd"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.367677 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-config-data" (OuterVolumeSpecName: "config-data") pod "0c8cf548-437e-4662-89ea-46923e0611dd" (UID: "0c8cf548-437e-4662-89ea-46923e0611dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.376239 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0c8cf548-437e-4662-89ea-46923e0611dd" (UID: "0c8cf548-437e-4662-89ea-46923e0611dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.445183 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw5hr\" (UniqueName: \"kubernetes.io/projected/0c8cf548-437e-4662-89ea-46923e0611dd-kube-api-access-gw5hr\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.445237 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.445250 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.445260 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.445270 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.445279 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0c8cf548-437e-4662-89ea-46923e0611dd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:29 crc kubenswrapper[4857]: E1128 13:51:29.669568 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 28 13:51:29 crc kubenswrapper[4857]: E1128 13:51:29.669735 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qbgxz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-g4slf_openstack(d9199b78-2d93-4877-95b8-ed8457716a3f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:51:29 crc kubenswrapper[4857]: E1128 13:51:29.670970 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-g4slf" podUID="d9199b78-2d93-4877-95b8-ed8457716a3f" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.863093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-92t2k" event={"ID":"0c8cf548-437e-4662-89ea-46923e0611dd","Type":"ContainerDied","Data":"ee86c723b1757e0a276683b36f03550d38ea8483e2efb0ba7d065c4d40bbb6ba"} Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.863512 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee86c723b1757e0a276683b36f03550d38ea8483e2efb0ba7d065c4d40bbb6ba" Nov 28 13:51:29 crc kubenswrapper[4857]: I1128 13:51:29.863151 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-92t2k" Nov 28 13:51:29 crc kubenswrapper[4857]: E1128 13:51:29.866036 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-g4slf" podUID="d9199b78-2d93-4877-95b8-ed8457716a3f" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.299474 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-92t2k"] Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.306685 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-92t2k"] Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.420536 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-mt7c6"] Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.421139 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0c8cf548-437e-4662-89ea-46923e0611dd" containerName="keystone-bootstrap" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.421171 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0c8cf548-437e-4662-89ea-46923e0611dd" containerName="keystone-bootstrap" Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.421195 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerName="dnsmasq-dns" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.421207 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerName="dnsmasq-dns" Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.421233 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerName="init" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.421246 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerName="init" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.421582 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0c8cf548-437e-4662-89ea-46923e0611dd" containerName="keystone-bootstrap" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.421647 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="38632ea8-8761-4f9c-ab8a-29540bf608b1" containerName="dnsmasq-dns" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.422695 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.425337 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.425481 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.428978 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49rtd" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.429273 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.434277 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.434725 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mt7c6"] Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.563602 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-fernet-keys\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.563681 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jrdlj\" (UniqueName: \"kubernetes.io/projected/c908dccd-c878-4dab-8186-632eb7750cff-kube-api-access-jrdlj\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.563712 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-scripts\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.563921 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-combined-ca-bundle\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.563995 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-credential-keys\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.564120 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-config-data\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.665309 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-fernet-keys\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.665368 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jrdlj\" (UniqueName: \"kubernetes.io/projected/c908dccd-c878-4dab-8186-632eb7750cff-kube-api-access-jrdlj\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.665391 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-scripts\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.665453 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-combined-ca-bundle\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.665472 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-credential-keys\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.665511 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-config-data\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.670381 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-credential-keys\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.676438 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-scripts\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.676590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-combined-ca-bundle\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.676656 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-config-data\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.677372 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-fernet-keys\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.680729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jrdlj\" (UniqueName: \"kubernetes.io/projected/c908dccd-c878-4dab-8186-632eb7750cff-kube-api-access-jrdlj\") pod \"keystone-bootstrap-mt7c6\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.746207 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.816626 4857 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.817214 4857 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m64mr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-f9rw6_openstack(aebc3704-6a31-4813-8826-622ffb7f6934): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.818803 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-f9rw6" podUID="aebc3704-6a31-4813-8826-622ffb7f6934" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.881986 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" event={"ID":"61e58ae5-9323-422f-ae62-a88146ae3beb","Type":"ContainerDied","Data":"eeb6d6d5383bd9e5dd6eeba7f122b9e73d1d16c62826ef42e397bdbaba99ec2f"} Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.882028 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eeb6d6d5383bd9e5dd6eeba7f122b9e73d1d16c62826ef42e397bdbaba99ec2f" Nov 28 13:51:30 crc kubenswrapper[4857]: E1128 13:51:30.885834 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-f9rw6" podUID="aebc3704-6a31-4813-8826-622ffb7f6934" Nov 28 13:51:30 crc kubenswrapper[4857]: I1128 13:51:30.927187 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.073931 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-svc\") pod \"61e58ae5-9323-422f-ae62-a88146ae3beb\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.074342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpk5x\" (UniqueName: \"kubernetes.io/projected/61e58ae5-9323-422f-ae62-a88146ae3beb-kube-api-access-xpk5x\") pod \"61e58ae5-9323-422f-ae62-a88146ae3beb\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.074379 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-nb\") pod \"61e58ae5-9323-422f-ae62-a88146ae3beb\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.074412 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-config\") pod \"61e58ae5-9323-422f-ae62-a88146ae3beb\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.074461 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-sb\") pod \"61e58ae5-9323-422f-ae62-a88146ae3beb\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.074553 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-swift-storage-0\") pod \"61e58ae5-9323-422f-ae62-a88146ae3beb\" (UID: \"61e58ae5-9323-422f-ae62-a88146ae3beb\") " Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.078650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61e58ae5-9323-422f-ae62-a88146ae3beb-kube-api-access-xpk5x" (OuterVolumeSpecName: "kube-api-access-xpk5x") pod "61e58ae5-9323-422f-ae62-a88146ae3beb" (UID: "61e58ae5-9323-422f-ae62-a88146ae3beb"). InnerVolumeSpecName "kube-api-access-xpk5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.123387 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-config" (OuterVolumeSpecName: "config") pod "61e58ae5-9323-422f-ae62-a88146ae3beb" (UID: "61e58ae5-9323-422f-ae62-a88146ae3beb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.130009 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "61e58ae5-9323-422f-ae62-a88146ae3beb" (UID: "61e58ae5-9323-422f-ae62-a88146ae3beb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.134913 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "61e58ae5-9323-422f-ae62-a88146ae3beb" (UID: "61e58ae5-9323-422f-ae62-a88146ae3beb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.137425 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "61e58ae5-9323-422f-ae62-a88146ae3beb" (UID: "61e58ae5-9323-422f-ae62-a88146ae3beb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.141616 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "61e58ae5-9323-422f-ae62-a88146ae3beb" (UID: "61e58ae5-9323-422f-ae62-a88146ae3beb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.175895 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpk5x\" (UniqueName: \"kubernetes.io/projected/61e58ae5-9323-422f-ae62-a88146ae3beb-kube-api-access-xpk5x\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.175921 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.175930 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.175938 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.175964 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.175972 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/61e58ae5-9323-422f-ae62-a88146ae3beb-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.279620 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-mt7c6"] Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.893516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"703d70c6-c4a2-4ea3-a547-6da82c40a8aa","Type":"ContainerStarted","Data":"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9"} Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.893590 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-log" containerID="cri-o://aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87" gracePeriod=30 Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.893677 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-httpd" containerID="cri-o://a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9" gracePeriod=30 Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.895186 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mt7c6" event={"ID":"c908dccd-c878-4dab-8186-632eb7750cff","Type":"ContainerStarted","Data":"e1f8f8e9bc6117e87029e7ff11a91ae766918b1844404c4c2ebb9d051e3cab75"} Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.895241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mt7c6" event={"ID":"c908dccd-c878-4dab-8186-632eb7750cff","Type":"ContainerStarted","Data":"2ea949bf399fa213df92a5e9220a297b12d9b86ad373984574e3e227d48b8dfd"} Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.896872 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hw6nk" event={"ID":"b5b67972-d610-4d3c-96e3-b8a1593f196a","Type":"ContainerStarted","Data":"b59a97768689c496e41691f3b6f3693124ea51c593b2a2c5ee62bcd9479b8ee4"} Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.898932 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerStarted","Data":"469ba51de67f78e9d88dca6693a397f6a6636c87ca3e4120dd8b5a9837fd6dc3"} Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.901272 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.904028 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4742114d-6a22-4e30-b6c9-31a7701763d0","Type":"ContainerStarted","Data":"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61"} Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.904115 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-log" containerID="cri-o://8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec" gracePeriod=30 Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.904169 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-httpd" containerID="cri-o://c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61" gracePeriod=30 Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.924382 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=20.924359868 podStartE2EDuration="20.924359868s" podCreationTimestamp="2025-11-28 13:51:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:31.913127985 +0000 UTC m=+1342.037069422" watchObservedRunningTime="2025-11-28 13:51:31.924359868 +0000 UTC m=+1342.048301305" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.934466 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-hw6nk" podStartSLOduration=3.683621207 podStartE2EDuration="24.934448729s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="2025-11-28 13:51:09.510319071 +0000 UTC m=+1319.634260508" lastFinishedPulling="2025-11-28 13:51:30.761146583 +0000 UTC m=+1340.885088030" observedRunningTime="2025-11-28 13:51:31.929398313 +0000 UTC m=+1342.053339770" watchObservedRunningTime="2025-11-28 13:51:31.934448729 +0000 UTC m=+1342.058390166" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.954000 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=21.953981455 podStartE2EDuration="21.953981455s" podCreationTimestamp="2025-11-28 13:51:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:31.952340251 +0000 UTC m=+1342.076281718" watchObservedRunningTime="2025-11-28 13:51:31.953981455 +0000 UTC m=+1342.077922892" Nov 28 13:51:31 crc kubenswrapper[4857]: I1128 13:51:31.977031 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-mt7c6" podStartSLOduration=1.9770124249999999 podStartE2EDuration="1.977012425s" podCreationTimestamp="2025-11-28 13:51:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:31.972421812 +0000 UTC m=+1342.096363259" watchObservedRunningTime="2025-11-28 13:51:31.977012425 +0000 UTC m=+1342.100953882" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.000004 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rv95b"] Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.011278 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rv95b"] Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.240403 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0c8cf548-437e-4662-89ea-46923e0611dd" path="/var/lib/kubelet/pods/0c8cf548-437e-4662-89ea-46923e0611dd/volumes" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.241299 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" path="/var/lib/kubelet/pods/61e58ae5-9323-422f-ae62-a88146ae3beb/volumes" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.758183 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.772290 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.914742 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerStarted","Data":"c8b8591445954b9e685515c84c1d977b8750fa6e93237284c1d33ce8ceabed21"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917056 4857 generic.go:334] "Generic (PLEG): container finished" podID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerID="c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61" exitCode=0 Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917080 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917092 4857 generic.go:334] "Generic (PLEG): container finished" podID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerID="8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec" exitCode=143 Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4742114d-6a22-4e30-b6c9-31a7701763d0","Type":"ContainerDied","Data":"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4742114d-6a22-4e30-b6c9-31a7701763d0","Type":"ContainerDied","Data":"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917226 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4742114d-6a22-4e30-b6c9-31a7701763d0","Type":"ContainerDied","Data":"8fa66d44052f36b03e2cbaf89214d8016057c0b84901e1dca4afb599c56e0f70"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.917246 4857 scope.go:117] "RemoveContainer" containerID="c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.920605 4857 generic.go:334] "Generic (PLEG): container finished" podID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerID="a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9" exitCode=0 Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.920645 4857 generic.go:334] "Generic (PLEG): container finished" podID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerID="aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87" exitCode=143 Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.921217 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.921933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"703d70c6-c4a2-4ea3-a547-6da82c40a8aa","Type":"ContainerDied","Data":"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.922002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"703d70c6-c4a2-4ea3-a547-6da82c40a8aa","Type":"ContainerDied","Data":"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.922014 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"703d70c6-c4a2-4ea3-a547-6da82c40a8aa","Type":"ContainerDied","Data":"3991f29e214f395bc91ce3aa26eff523543fb5f3e08b6258471afcced639e300"} Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.932939 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-httpd-run\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933043 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-scripts\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933092 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933107 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-logs\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933131 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933145 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-logs\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933168 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-config-data\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933186 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-httpd-run\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933221 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-combined-ca-bundle\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933239 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88dd9\" (UniqueName: \"kubernetes.io/projected/4742114d-6a22-4e30-b6c9-31a7701763d0-kube-api-access-88dd9\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933284 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2dd6\" (UniqueName: \"kubernetes.io/projected/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-kube-api-access-w2dd6\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933314 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-config-data\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933359 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-scripts\") pod \"4742114d-6a22-4e30-b6c9-31a7701763d0\" (UID: \"4742114d-6a22-4e30-b6c9-31a7701763d0\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933421 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-combined-ca-bundle\") pod \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\" (UID: \"703d70c6-c4a2-4ea3-a547-6da82c40a8aa\") " Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933593 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.933996 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.934304 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.935363 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-logs" (OuterVolumeSpecName: "logs") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.935937 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-logs" (OuterVolumeSpecName: "logs") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.951241 4857 scope.go:117] "RemoveContainer" containerID="8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.955074 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-scripts" (OuterVolumeSpecName: "scripts") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.956119 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-scripts" (OuterVolumeSpecName: "scripts") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.956266 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4742114d-6a22-4e30-b6c9-31a7701763d0-kube-api-access-88dd9" (OuterVolumeSpecName: "kube-api-access-88dd9") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "kube-api-access-88dd9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.956954 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.957724 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-kube-api-access-w2dd6" (OuterVolumeSpecName: "kube-api-access-w2dd6") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "kube-api-access-w2dd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.968166 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.968684 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.973166 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.989481 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-config-data" (OuterVolumeSpecName: "config-data") pod "4742114d-6a22-4e30-b6c9-31a7701763d0" (UID: "4742114d-6a22-4e30-b6c9-31a7701763d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:32 crc kubenswrapper[4857]: I1128 13:51:32.990106 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-config-data" (OuterVolumeSpecName: "config-data") pod "703d70c6-c4a2-4ea3-a547-6da82c40a8aa" (UID: "703d70c6-c4a2-4ea3-a547-6da82c40a8aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036135 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036196 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036211 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036223 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036320 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036333 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036349 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036359 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036386 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036396 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4742114d-6a22-4e30-b6c9-31a7701763d0-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036408 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4742114d-6a22-4e30-b6c9-31a7701763d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036420 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88dd9\" (UniqueName: \"kubernetes.io/projected/4742114d-6a22-4e30-b6c9-31a7701763d0-kube-api-access-88dd9\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.036431 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2dd6\" (UniqueName: \"kubernetes.io/projected/703d70c6-c4a2-4ea3-a547-6da82c40a8aa-kube-api-access-w2dd6\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.059650 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.062077 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.080845 4857 scope.go:117] "RemoveContainer" containerID="c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.081756 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61\": container with ID starting with c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61 not found: ID does not exist" containerID="c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.081825 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61"} err="failed to get container status \"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61\": rpc error: code = NotFound desc = could not find container \"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61\": container with ID starting with c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61 not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.082152 4857 scope.go:117] "RemoveContainer" containerID="8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.082543 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec\": container with ID starting with 8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec not found: ID does not exist" containerID="8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.082581 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec"} err="failed to get container status \"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec\": rpc error: code = NotFound desc = could not find container \"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec\": container with ID starting with 8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.082598 4857 scope.go:117] "RemoveContainer" containerID="c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.082895 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61"} err="failed to get container status \"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61\": rpc error: code = NotFound desc = could not find container \"c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61\": container with ID starting with c4a3f22359a524f804eee3eba4e070f1ac208480a5a1089b435857bd82e1bb61 not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.083017 4857 scope.go:117] "RemoveContainer" containerID="8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.083599 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec"} err="failed to get container status \"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec\": rpc error: code = NotFound desc = could not find container \"8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec\": container with ID starting with 8deabdae6c53d6b230084ecce45bec4133dfe7c363177e2148924d6b10c65fec not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.083621 4857 scope.go:117] "RemoveContainer" containerID="a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.108494 4857 scope.go:117] "RemoveContainer" containerID="aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.125392 4857 scope.go:117] "RemoveContainer" containerID="a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.125773 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9\": container with ID starting with a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9 not found: ID does not exist" containerID="a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.125816 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9"} err="failed to get container status \"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9\": rpc error: code = NotFound desc = could not find container \"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9\": container with ID starting with a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9 not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.125849 4857 scope.go:117] "RemoveContainer" containerID="aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.126142 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87\": container with ID starting with aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87 not found: ID does not exist" containerID="aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.126171 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87"} err="failed to get container status \"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87\": rpc error: code = NotFound desc = could not find container \"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87\": container with ID starting with aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87 not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.126193 4857 scope.go:117] "RemoveContainer" containerID="a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.126350 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9"} err="failed to get container status \"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9\": rpc error: code = NotFound desc = could not find container \"a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9\": container with ID starting with a33c1315e2eb2ef5a1d17ef2a8b1973d20d88876b29d9095316e0436d85fb5d9 not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.126371 4857 scope.go:117] "RemoveContainer" containerID="aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.126559 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87"} err="failed to get container status \"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87\": rpc error: code = NotFound desc = could not find container \"aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87\": container with ID starting with aaf0cf395550c3bf42091c1f232580395dd0dd3ac224c69bb89c4ebe23684a87 not found: ID does not exist" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.137644 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.137850 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.277359 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.305586 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.318364 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.327377 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335293 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.335693 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-log" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335714 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-log" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.335729 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="init" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335738 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="init" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.335761 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-log" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335772 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-log" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.335786 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="dnsmasq-dns" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335793 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="dnsmasq-dns" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.335808 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-httpd" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335817 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-httpd" Nov 28 13:51:33 crc kubenswrapper[4857]: E1128 13:51:33.335834 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-httpd" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.335842 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-httpd" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.336105 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-log" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.336125 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-log" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.336134 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="dnsmasq-dns" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.336150 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" containerName="glance-httpd" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.336165 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" containerName="glance-httpd" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.337203 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.343161 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.343240 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.343908 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.346874 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-x48r5" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.349429 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.351423 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.354089 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.354410 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.385995 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.394646 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446750 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-config-data\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446795 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-logs\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446816 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446839 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446863 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-logs\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446913 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnbmc\" (UniqueName: \"kubernetes.io/projected/37e5bb25-0df5-40da-996d-b4e23120822b-kube-api-access-gnbmc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446935 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446967 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.446994 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh8w2\" (UniqueName: \"kubernetes.io/projected/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-kube-api-access-hh8w2\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447011 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447027 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447043 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447058 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-scripts\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447110 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447125 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.447396 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549211 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549320 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549366 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549411 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-config-data\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549429 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-logs\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549487 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-logs\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549536 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnbmc\" (UniqueName: \"kubernetes.io/projected/37e5bb25-0df5-40da-996d-b4e23120822b-kube-api-access-gnbmc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549558 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549574 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549604 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh8w2\" (UniqueName: \"kubernetes.io/projected/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-kube-api-access-hh8w2\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549624 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549644 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.549676 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-scripts\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.550624 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.550653 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-logs\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.550673 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.550904 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.550922 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.551200 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-logs\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.556855 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.557720 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.558997 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.559100 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.559198 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.559354 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-config-data\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.562138 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.566493 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-scripts\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.571846 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnbmc\" (UniqueName: \"kubernetes.io/projected/37e5bb25-0df5-40da-996d-b4e23120822b-kube-api-access-gnbmc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.572838 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh8w2\" (UniqueName: \"kubernetes.io/projected/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-kube-api-access-hh8w2\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.592143 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.596509 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " pod="openstack/glance-default-external-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.662726 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:33 crc kubenswrapper[4857]: I1128 13:51:33.684793 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.255692 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4742114d-6a22-4e30-b6c9-31a7701763d0" path="/var/lib/kubelet/pods/4742114d-6a22-4e30-b6c9-31a7701763d0/volumes" Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.257096 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="703d70c6-c4a2-4ea3-a547-6da82c40a8aa" path="/var/lib/kubelet/pods/703d70c6-c4a2-4ea3-a547-6da82c40a8aa/volumes" Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.382715 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:51:34 crc kubenswrapper[4857]: W1128 13:51:34.413531 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37e5bb25_0df5_40da_996d_b4e23120822b.slice/crio-1e894c5e9e72d4692d6fc1218e7ce3ff629efdc35020ce6b741fc17e72214e0e WatchSource:0}: Error finding container 1e894c5e9e72d4692d6fc1218e7ce3ff629efdc35020ce6b741fc17e72214e0e: Status 404 returned error can't find the container with id 1e894c5e9e72d4692d6fc1218e7ce3ff629efdc35020ce6b741fc17e72214e0e Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.465420 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.964158 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645","Type":"ContainerStarted","Data":"0961de5fcd8570f79f704e21bb51e23997d1754b8e25281a801cda46f108bdf1"} Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.966182 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"37e5bb25-0df5-40da-996d-b4e23120822b","Type":"ContainerStarted","Data":"1e894c5e9e72d4692d6fc1218e7ce3ff629efdc35020ce6b741fc17e72214e0e"} Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.971954 4857 generic.go:334] "Generic (PLEG): container finished" podID="b5b67972-d610-4d3c-96e3-b8a1593f196a" containerID="b59a97768689c496e41691f3b6f3693124ea51c593b2a2c5ee62bcd9479b8ee4" exitCode=0 Nov 28 13:51:34 crc kubenswrapper[4857]: I1128 13:51:34.972012 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hw6nk" event={"ID":"b5b67972-d610-4d3c-96e3-b8a1593f196a","Type":"ContainerDied","Data":"b59a97768689c496e41691f3b6f3693124ea51c593b2a2c5ee62bcd9479b8ee4"} Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.817427 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c79d794d7-rv95b" podUID="61e58ae5-9323-422f-ae62-a88146ae3beb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.136:5353: i/o timeout" Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.983256 4857 generic.go:334] "Generic (PLEG): container finished" podID="c908dccd-c878-4dab-8186-632eb7750cff" containerID="e1f8f8e9bc6117e87029e7ff11a91ae766918b1844404c4c2ebb9d051e3cab75" exitCode=0 Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.983330 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mt7c6" event={"ID":"c908dccd-c878-4dab-8186-632eb7750cff","Type":"ContainerDied","Data":"e1f8f8e9bc6117e87029e7ff11a91ae766918b1844404c4c2ebb9d051e3cab75"} Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.986354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645","Type":"ContainerStarted","Data":"c64c13d5543f9321a6a584d223b8b188f62ddf7016e791563e13727f25e12270"} Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.986402 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645","Type":"ContainerStarted","Data":"856c9326ccc530d5676d0bd8dbb59c6ee4506778c164fddd33bb31ea3053126b"} Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.988314 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"37e5bb25-0df5-40da-996d-b4e23120822b","Type":"ContainerStarted","Data":"49ac69b05eee5d86114afb435aa0c9f9c1d54e38fdb7fd9f71f2ab23e6cf8c2f"} Nov 28 13:51:35 crc kubenswrapper[4857]: I1128 13:51:35.988348 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"37e5bb25-0df5-40da-996d-b4e23120822b","Type":"ContainerStarted","Data":"ce095129f118f2af696e99064550cdd5c3f0cc311b63fd0d6fe09c258eebd664"} Nov 28 13:51:36 crc kubenswrapper[4857]: I1128 13:51:36.046569 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.046540969 podStartE2EDuration="3.046540969s" podCreationTimestamp="2025-11-28 13:51:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:36.026832148 +0000 UTC m=+1346.150773585" watchObservedRunningTime="2025-11-28 13:51:36.046540969 +0000 UTC m=+1346.170482406" Nov 28 13:51:36 crc kubenswrapper[4857]: I1128 13:51:36.050602 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.050583488 podStartE2EDuration="3.050583488s" podCreationTimestamp="2025-11-28 13:51:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:36.047400912 +0000 UTC m=+1346.171342369" watchObservedRunningTime="2025-11-28 13:51:36.050583488 +0000 UTC m=+1346.174524925" Nov 28 13:51:39 crc kubenswrapper[4857]: E1128 13:51:39.121916 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice/crio-4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7\": RecentStats: unable to find data in memory cache]" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.379452 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.384702 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390269 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-scripts\") pod \"c908dccd-c878-4dab-8186-632eb7750cff\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390311 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-fernet-keys\") pod \"c908dccd-c878-4dab-8186-632eb7750cff\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390359 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-659wg\" (UniqueName: \"kubernetes.io/projected/b5b67972-d610-4d3c-96e3-b8a1593f196a-kube-api-access-659wg\") pod \"b5b67972-d610-4d3c-96e3-b8a1593f196a\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390392 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-credential-keys\") pod \"c908dccd-c878-4dab-8186-632eb7750cff\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390477 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5b67972-d610-4d3c-96e3-b8a1593f196a-logs\") pod \"b5b67972-d610-4d3c-96e3-b8a1593f196a\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390535 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jrdlj\" (UniqueName: \"kubernetes.io/projected/c908dccd-c878-4dab-8186-632eb7750cff-kube-api-access-jrdlj\") pod \"c908dccd-c878-4dab-8186-632eb7750cff\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390574 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-config-data\") pod \"c908dccd-c878-4dab-8186-632eb7750cff\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390605 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-combined-ca-bundle\") pod \"c908dccd-c878-4dab-8186-632eb7750cff\" (UID: \"c908dccd-c878-4dab-8186-632eb7750cff\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390630 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-combined-ca-bundle\") pod \"b5b67972-d610-4d3c-96e3-b8a1593f196a\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390665 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-scripts\") pod \"b5b67972-d610-4d3c-96e3-b8a1593f196a\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.390688 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-config-data\") pod \"b5b67972-d610-4d3c-96e3-b8a1593f196a\" (UID: \"b5b67972-d610-4d3c-96e3-b8a1593f196a\") " Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.391485 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5b67972-d610-4d3c-96e3-b8a1593f196a-logs" (OuterVolumeSpecName: "logs") pod "b5b67972-d610-4d3c-96e3-b8a1593f196a" (UID: "b5b67972-d610-4d3c-96e3-b8a1593f196a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.399361 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c908dccd-c878-4dab-8186-632eb7750cff" (UID: "c908dccd-c878-4dab-8186-632eb7750cff"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.399382 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-scripts" (OuterVolumeSpecName: "scripts") pod "b5b67972-d610-4d3c-96e3-b8a1593f196a" (UID: "b5b67972-d610-4d3c-96e3-b8a1593f196a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.399920 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c908dccd-c878-4dab-8186-632eb7750cff-kube-api-access-jrdlj" (OuterVolumeSpecName: "kube-api-access-jrdlj") pod "c908dccd-c878-4dab-8186-632eb7750cff" (UID: "c908dccd-c878-4dab-8186-632eb7750cff"). InnerVolumeSpecName "kube-api-access-jrdlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.412101 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-scripts" (OuterVolumeSpecName: "scripts") pod "c908dccd-c878-4dab-8186-632eb7750cff" (UID: "c908dccd-c878-4dab-8186-632eb7750cff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.436213 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c908dccd-c878-4dab-8186-632eb7750cff" (UID: "c908dccd-c878-4dab-8186-632eb7750cff"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.455164 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5b67972-d610-4d3c-96e3-b8a1593f196a-kube-api-access-659wg" (OuterVolumeSpecName: "kube-api-access-659wg") pod "b5b67972-d610-4d3c-96e3-b8a1593f196a" (UID: "b5b67972-d610-4d3c-96e3-b8a1593f196a"). InnerVolumeSpecName "kube-api-access-659wg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493286 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-659wg\" (UniqueName: \"kubernetes.io/projected/b5b67972-d610-4d3c-96e3-b8a1593f196a-kube-api-access-659wg\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493331 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493342 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5b67972-d610-4d3c-96e3-b8a1593f196a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493353 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jrdlj\" (UniqueName: \"kubernetes.io/projected/c908dccd-c878-4dab-8186-632eb7750cff-kube-api-access-jrdlj\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493364 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493373 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.493382 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.496144 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-config-data" (OuterVolumeSpecName: "config-data") pod "c908dccd-c878-4dab-8186-632eb7750cff" (UID: "c908dccd-c878-4dab-8186-632eb7750cff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.581211 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-config-data" (OuterVolumeSpecName: "config-data") pod "b5b67972-d610-4d3c-96e3-b8a1593f196a" (UID: "b5b67972-d610-4d3c-96e3-b8a1593f196a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.595962 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.596215 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.606325 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c908dccd-c878-4dab-8186-632eb7750cff" (UID: "c908dccd-c878-4dab-8186-632eb7750cff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.613119 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5b67972-d610-4d3c-96e3-b8a1593f196a" (UID: "b5b67972-d610-4d3c-96e3-b8a1593f196a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.697513 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c908dccd-c878-4dab-8186-632eb7750cff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:41 crc kubenswrapper[4857]: I1128 13:51:41.697547 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5b67972-d610-4d3c-96e3-b8a1593f196a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.039758 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-mt7c6" event={"ID":"c908dccd-c878-4dab-8186-632eb7750cff","Type":"ContainerDied","Data":"2ea949bf399fa213df92a5e9220a297b12d9b86ad373984574e3e227d48b8dfd"} Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.040132 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ea949bf399fa213df92a5e9220a297b12d9b86ad373984574e3e227d48b8dfd" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.039803 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-mt7c6" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.041505 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-hw6nk" event={"ID":"b5b67972-d610-4d3c-96e3-b8a1593f196a","Type":"ContainerDied","Data":"f623d672907463ea8a92878394f5bec9ceeb80535f553f1a73713a088ecceb59"} Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.041522 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-hw6nk" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.041529 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f623d672907463ea8a92878394f5bec9ceeb80535f553f1a73713a088ecceb59" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.043857 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerStarted","Data":"9b6c4bbe152e866f7792de341cdba73138d078a52439a73ea5c1560ffc423419"} Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.046187 4857 generic.go:334] "Generic (PLEG): container finished" podID="06f8c64b-0075-49d3-a2ae-0ecc1d03232a" containerID="a608443173e09bdc122142915f62f799bb04de43dba7d386430145ce82247138" exitCode=0 Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.046218 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sb67r" event={"ID":"06f8c64b-0075-49d3-a2ae-0ecc1d03232a","Type":"ContainerDied","Data":"a608443173e09bdc122142915f62f799bb04de43dba7d386430145ce82247138"} Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.496437 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-794cbbfc48-m96jr"] Nov 28 13:51:42 crc kubenswrapper[4857]: E1128 13:51:42.496761 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5b67972-d610-4d3c-96e3-b8a1593f196a" containerName="placement-db-sync" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.496775 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5b67972-d610-4d3c-96e3-b8a1593f196a" containerName="placement-db-sync" Nov 28 13:51:42 crc kubenswrapper[4857]: E1128 13:51:42.496806 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c908dccd-c878-4dab-8186-632eb7750cff" containerName="keystone-bootstrap" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.496812 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c908dccd-c878-4dab-8186-632eb7750cff" containerName="keystone-bootstrap" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.496993 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5b67972-d610-4d3c-96e3-b8a1593f196a" containerName="placement-db-sync" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.497009 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c908dccd-c878-4dab-8186-632eb7750cff" containerName="keystone-bootstrap" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.497846 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.501838 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.502116 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.501973 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-kx7nh" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.502019 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.502435 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.523140 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-794cbbfc48-m96jr"] Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.592505 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5b64c5866d-mkt8b"] Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.593549 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.596387 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.596930 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.600798 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.605404 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.605769 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-49rtd" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.605935 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.608774 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b64c5866d-mkt8b"] Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610025 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kgtl\" (UniqueName: \"kubernetes.io/projected/0944133e-cee5-4927-8f5e-8f781b30d224-kube-api-access-4kgtl\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610065 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0944133e-cee5-4927-8f5e-8f781b30d224-logs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610126 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-combined-ca-bundle\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610141 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-internal-tls-certs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610158 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-scripts\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610177 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-config-data\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.610211 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-public-tls-certs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.713559 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-public-tls-certs\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.713651 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-combined-ca-bundle\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.713686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-internal-tls-certs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714700 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-scripts\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714741 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-config-data\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714816 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-public-tls-certs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714845 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-scripts\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714883 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-combined-ca-bundle\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714933 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-credential-keys\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.714987 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kftc\" (UniqueName: \"kubernetes.io/projected/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-kube-api-access-5kftc\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.715017 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-fernet-keys\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.715065 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-config-data\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.715105 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kgtl\" (UniqueName: \"kubernetes.io/projected/0944133e-cee5-4927-8f5e-8f781b30d224-kube-api-access-4kgtl\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.715141 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0944133e-cee5-4927-8f5e-8f781b30d224-logs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.715164 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-internal-tls-certs\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.718771 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0944133e-cee5-4927-8f5e-8f781b30d224-logs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.720901 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-combined-ca-bundle\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.721116 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-public-tls-certs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.722355 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-internal-tls-certs\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.734464 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-scripts\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.738164 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-config-data\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.742676 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kgtl\" (UniqueName: \"kubernetes.io/projected/0944133e-cee5-4927-8f5e-8f781b30d224-kube-api-access-4kgtl\") pod \"placement-794cbbfc48-m96jr\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.813470 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-public-tls-certs\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816554 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-scripts\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816591 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-combined-ca-bundle\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-credential-keys\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816658 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kftc\" (UniqueName: \"kubernetes.io/projected/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-kube-api-access-5kftc\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-fernet-keys\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-config-data\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.816756 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-internal-tls-certs\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.821783 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-credential-keys\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.822251 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-fernet-keys\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.822712 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-config-data\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.822981 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-scripts\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.834308 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-public-tls-certs\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.834529 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-combined-ca-bundle\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.836448 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-internal-tls-certs\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.837309 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kftc\" (UniqueName: \"kubernetes.io/projected/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-kube-api-access-5kftc\") pod \"keystone-5b64c5866d-mkt8b\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:42 crc kubenswrapper[4857]: I1128 13:51:42.913334 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.290712 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-794cbbfc48-m96jr"] Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.447369 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5b64c5866d-mkt8b"] Nov 28 13:51:43 crc kubenswrapper[4857]: W1128 13:51:43.453506 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d4d3b72_fd05_4a47_925c_f17f77c46fc1.slice/crio-67add3ab60c21921a75d4024750fceb80cecdc89bb2d69480c26ea4c366599cf WatchSource:0}: Error finding container 67add3ab60c21921a75d4024750fceb80cecdc89bb2d69480c26ea4c366599cf: Status 404 returned error can't find the container with id 67add3ab60c21921a75d4024750fceb80cecdc89bb2d69480c26ea4c366599cf Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.535254 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.632462 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-combined-ca-bundle\") pod \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.632519 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64gpp\" (UniqueName: \"kubernetes.io/projected/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-kube-api-access-64gpp\") pod \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.632695 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-config\") pod \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\" (UID: \"06f8c64b-0075-49d3-a2ae-0ecc1d03232a\") " Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.636926 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-kube-api-access-64gpp" (OuterVolumeSpecName: "kube-api-access-64gpp") pod "06f8c64b-0075-49d3-a2ae-0ecc1d03232a" (UID: "06f8c64b-0075-49d3-a2ae-0ecc1d03232a"). InnerVolumeSpecName "kube-api-access-64gpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.660683 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "06f8c64b-0075-49d3-a2ae-0ecc1d03232a" (UID: "06f8c64b-0075-49d3-a2ae-0ecc1d03232a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.665066 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.665135 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.684346 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-config" (OuterVolumeSpecName: "config") pod "06f8c64b-0075-49d3-a2ae-0ecc1d03232a" (UID: "06f8c64b-0075-49d3-a2ae-0ecc1d03232a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.685105 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.685160 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.713387 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.727015 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.734731 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.734764 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64gpp\" (UniqueName: \"kubernetes.io/projected/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-kube-api-access-64gpp\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.734774 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/06f8c64b-0075-49d3-a2ae-0ecc1d03232a-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.745205 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:51:43 crc kubenswrapper[4857]: I1128 13:51:43.745282 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.121535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g4slf" event={"ID":"d9199b78-2d93-4877-95b8-ed8457716a3f","Type":"ContainerStarted","Data":"5eb527ac4bbe9bee9521dab12726c27c00c910564fa737354123b784c289d522"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.139800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-794cbbfc48-m96jr" event={"ID":"0944133e-cee5-4927-8f5e-8f781b30d224","Type":"ContainerStarted","Data":"10bea3eb3ec5b5f1dc962843d2fb1b65bf5e100082e1824518a74a4bdbf6d742"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.140101 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-794cbbfc48-m96jr" event={"ID":"0944133e-cee5-4927-8f5e-8f781b30d224","Type":"ContainerStarted","Data":"5f6e16bc3a6a3e255193ac8544c8911785ede7f13bf7ff1f4eb96c9e2ab1632c"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.140114 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-794cbbfc48-m96jr" event={"ID":"0944133e-cee5-4927-8f5e-8f781b30d224","Type":"ContainerStarted","Data":"c0821707a927754dd25c44eaa6ce2b49c5a796281c360b30199708467b898da3"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.141030 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.141070 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.156597 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-sb67r" event={"ID":"06f8c64b-0075-49d3-a2ae-0ecc1d03232a","Type":"ContainerDied","Data":"3c953f2686f9b8e0e44dd33a85a9195be946924bf9dec1ca5dd90bff98f88750"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.156637 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c953f2686f9b8e0e44dd33a85a9195be946924bf9dec1ca5dd90bff98f88750" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.156701 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-sb67r" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.187003 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b64c5866d-mkt8b" event={"ID":"5d4d3b72-fd05-4a47-925c-f17f77c46fc1","Type":"ContainerStarted","Data":"05d50721ac2243fb0f0316bcd6f40e7732694575564655c777cddace1e0267e4"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.187058 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b64c5866d-mkt8b" event={"ID":"5d4d3b72-fd05-4a47-925c-f17f77c46fc1","Type":"ContainerStarted","Data":"67add3ab60c21921a75d4024750fceb80cecdc89bb2d69480c26ea4c366599cf"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.187166 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.199360 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-794cbbfc48-m96jr" podStartSLOduration=2.1993343579999998 podStartE2EDuration="2.199334358s" podCreationTimestamp="2025-11-28 13:51:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:44.180559483 +0000 UTC m=+1354.304500920" watchObservedRunningTime="2025-11-28 13:51:44.199334358 +0000 UTC m=+1354.323275795" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.207749 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-g4slf" podStartSLOduration=3.845149165 podStartE2EDuration="37.207729774s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="2025-11-28 13:51:09.524698818 +0000 UTC m=+1319.648640255" lastFinishedPulling="2025-11-28 13:51:42.887279427 +0000 UTC m=+1353.011220864" observedRunningTime="2025-11-28 13:51:44.149515577 +0000 UTC m=+1354.273457014" watchObservedRunningTime="2025-11-28 13:51:44.207729774 +0000 UTC m=+1354.331671211" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.214179 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-f9rw6" event={"ID":"aebc3704-6a31-4813-8826-622ffb7f6934","Type":"ContainerStarted","Data":"83ec5382a77be899a0142fdd5c00d8ed5b441bf19df2a875fdd024a926571dce"} Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.215242 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.215265 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.215519 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.216937 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5b64c5866d-mkt8b" podStartSLOduration=2.216919652 podStartE2EDuration="2.216919652s" podCreationTimestamp="2025-11-28 13:51:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:44.216043738 +0000 UTC m=+1354.339985175" watchObservedRunningTime="2025-11-28 13:51:44.216919652 +0000 UTC m=+1354.340861089" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.218414 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.388907 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-f9rw6" podStartSLOduration=3.50801299 podStartE2EDuration="37.3888309s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="2025-11-28 13:51:08.960735726 +0000 UTC m=+1319.084677163" lastFinishedPulling="2025-11-28 13:51:42.841553636 +0000 UTC m=+1352.965495073" observedRunningTime="2025-11-28 13:51:44.275684514 +0000 UTC m=+1354.399625941" watchObservedRunningTime="2025-11-28 13:51:44.3888309 +0000 UTC m=+1354.512772337" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.420123 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-rjcr5"] Nov 28 13:51:44 crc kubenswrapper[4857]: E1128 13:51:44.420655 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="06f8c64b-0075-49d3-a2ae-0ecc1d03232a" containerName="neutron-db-sync" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.423699 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="06f8c64b-0075-49d3-a2ae-0ecc1d03232a" containerName="neutron-db-sync" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.424035 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="06f8c64b-0075-49d3-a2ae-0ecc1d03232a" containerName="neutron-db-sync" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.425452 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.436363 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-rjcr5"] Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.456772 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b48bfd44-jstx9"] Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.458742 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.468330 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.468780 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.468931 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.469110 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-bhzgs" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.469820 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b48bfd44-jstx9"] Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558007 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-svc\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558081 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-httpd-config\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558110 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-config\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558148 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558181 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2b2pj\" (UniqueName: \"kubernetes.io/projected/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-kube-api-access-2b2pj\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558223 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-ovndb-tls-certs\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558247 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctjjl\" (UniqueName: \"kubernetes.io/projected/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-kube-api-access-ctjjl\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558462 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-config\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558476 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558500 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-combined-ca-bundle\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.558525 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.660325 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2b2pj\" (UniqueName: \"kubernetes.io/projected/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-kube-api-access-2b2pj\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.660994 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-ovndb-tls-certs\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661066 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctjjl\" (UniqueName: \"kubernetes.io/projected/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-kube-api-access-ctjjl\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661118 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-config\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661147 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661206 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-combined-ca-bundle\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661240 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-svc\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661708 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-httpd-config\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661780 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-config\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.661821 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.667304 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-svc\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.667502 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-swift-storage-0\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.667669 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-config\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.668313 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-nb\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.668423 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-httpd-config\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.669098 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-config\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.669675 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-sb\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.673808 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-combined-ca-bundle\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.688414 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2b2pj\" (UniqueName: \"kubernetes.io/projected/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-kube-api-access-2b2pj\") pod \"dnsmasq-dns-6b7b667979-rjcr5\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.689539 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-ovndb-tls-certs\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.698166 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctjjl\" (UniqueName: \"kubernetes.io/projected/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-kube-api-access-ctjjl\") pod \"neutron-b48bfd44-jstx9\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.762580 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:44 crc kubenswrapper[4857]: I1128 13:51:44.793097 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:45 crc kubenswrapper[4857]: I1128 13:51:45.430561 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-rjcr5"] Nov 28 13:51:45 crc kubenswrapper[4857]: W1128 13:51:45.479388 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod678fcbdb_0c5d_4b45_8c72_0fdca4b27761.slice/crio-7c822dd76497e77c14b033f71034235beb8702aff6c91ec70d9b2a75c77d55bc WatchSource:0}: Error finding container 7c822dd76497e77c14b033f71034235beb8702aff6c91ec70d9b2a75c77d55bc: Status 404 returned error can't find the container with id 7c822dd76497e77c14b033f71034235beb8702aff6c91ec70d9b2a75c77d55bc Nov 28 13:51:45 crc kubenswrapper[4857]: I1128 13:51:45.534002 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b48bfd44-jstx9"] Nov 28 13:51:45 crc kubenswrapper[4857]: W1128 13:51:45.557774 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode88c2b4b_8abf_4d38_a17f_b5231a0a254e.slice/crio-65c99d3abfad19cb4fc518b3c1a3f975ed0531ce0753b9c86263895e31aecedc WatchSource:0}: Error finding container 65c99d3abfad19cb4fc518b3c1a3f975ed0531ce0753b9c86263895e31aecedc: Status 404 returned error can't find the container with id 65c99d3abfad19cb4fc518b3c1a3f975ed0531ce0753b9c86263895e31aecedc Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.235262 4857 generic.go:334] "Generic (PLEG): container finished" podID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerID="04b7d9e4e1f4eb22b2df6eb83ed320e8c151a46c20e786d5164f458a3e16ea60" exitCode=0 Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.240869 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.240919 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.243880 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" event={"ID":"678fcbdb-0c5d-4b45-8c72-0fdca4b27761","Type":"ContainerDied","Data":"04b7d9e4e1f4eb22b2df6eb83ed320e8c151a46c20e786d5164f458a3e16ea60"} Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.243912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" event={"ID":"678fcbdb-0c5d-4b45-8c72-0fdca4b27761","Type":"ContainerStarted","Data":"7c822dd76497e77c14b033f71034235beb8702aff6c91ec70d9b2a75c77d55bc"} Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.243924 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b48bfd44-jstx9" event={"ID":"e88c2b4b-8abf-4d38-a17f-b5231a0a254e","Type":"ContainerStarted","Data":"54a7726b2191651fd67bcae0c2e74b8b56597f703b356cdffa64c067aea302e9"} Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.243933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b48bfd44-jstx9" event={"ID":"e88c2b4b-8abf-4d38-a17f-b5231a0a254e","Type":"ContainerStarted","Data":"65c99d3abfad19cb4fc518b3c1a3f975ed0531ce0753b9c86263895e31aecedc"} Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.830154 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.830798 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:51:46 crc kubenswrapper[4857]: I1128 13:51:46.832082 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.093634 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.095356 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.286735 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b48bfd44-jstx9" event={"ID":"e88c2b4b-8abf-4d38-a17f-b5231a0a254e","Type":"ContainerStarted","Data":"f6d1f70b6b825af6a00c0b03c85ab2fbf69835476846543de4e466d40c7f0e73"} Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.287456 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.298405 4857 generic.go:334] "Generic (PLEG): container finished" podID="d9199b78-2d93-4877-95b8-ed8457716a3f" containerID="5eb527ac4bbe9bee9521dab12726c27c00c910564fa737354123b784c289d522" exitCode=0 Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.298495 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g4slf" event={"ID":"d9199b78-2d93-4877-95b8-ed8457716a3f","Type":"ContainerDied","Data":"5eb527ac4bbe9bee9521dab12726c27c00c910564fa737354123b784c289d522"} Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.311037 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" event={"ID":"678fcbdb-0c5d-4b45-8c72-0fdca4b27761","Type":"ContainerStarted","Data":"ec4f5919f3ee799187f183844bf82284d6753f7bcc2ecfbf83199601e87b1f92"} Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.312258 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6b7846d5d5-ddbqf"] Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.314244 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.318253 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.318923 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.333316 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6b7846d5d5-ddbqf"] Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.334411 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-b48bfd44-jstx9" podStartSLOduration=3.3343913450000002 podStartE2EDuration="3.334391345s" podCreationTimestamp="2025-11-28 13:51:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:47.318568459 +0000 UTC m=+1357.442509906" watchObservedRunningTime="2025-11-28 13:51:47.334391345 +0000 UTC m=+1357.458332782" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.352231 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" podStartSLOduration=3.352210675 podStartE2EDuration="3.352210675s" podCreationTimestamp="2025-11-28 13:51:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:47.343801533 +0000 UTC m=+1357.467742970" watchObservedRunningTime="2025-11-28 13:51:47.352210675 +0000 UTC m=+1357.476152112" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.434811 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-ovndb-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.434898 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-public-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.435066 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-config\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.435115 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttdd4\" (UniqueName: \"kubernetes.io/projected/960b2298-15f9-436b-93c9-04b0617c0c9b-kube-api-access-ttdd4\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.435294 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-httpd-config\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.435415 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-internal-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.435554 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-combined-ca-bundle\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.537547 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-config\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.537892 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttdd4\" (UniqueName: \"kubernetes.io/projected/960b2298-15f9-436b-93c9-04b0617c0c9b-kube-api-access-ttdd4\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.537961 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-httpd-config\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.537993 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-internal-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.538021 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-combined-ca-bundle\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.538083 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-ovndb-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.538106 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-public-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.546352 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-httpd-config\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.547291 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-public-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.548067 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-combined-ca-bundle\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.548777 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-config\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.549192 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-ovndb-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.553932 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-internal-tls-certs\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.565608 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttdd4\" (UniqueName: \"kubernetes.io/projected/960b2298-15f9-436b-93c9-04b0617c0c9b-kube-api-access-ttdd4\") pod \"neutron-6b7846d5d5-ddbqf\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:47 crc kubenswrapper[4857]: I1128 13:51:47.654633 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:48 crc kubenswrapper[4857]: W1128 13:51:48.194860 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod960b2298_15f9_436b_93c9_04b0617c0c9b.slice/crio-97bc4d2b38fa9e4d4e921a4fcbc290ddc0e73b0481af2a2ce20714601f8a74b5 WatchSource:0}: Error finding container 97bc4d2b38fa9e4d4e921a4fcbc290ddc0e73b0481af2a2ce20714601f8a74b5: Status 404 returned error can't find the container with id 97bc4d2b38fa9e4d4e921a4fcbc290ddc0e73b0481af2a2ce20714601f8a74b5 Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.196361 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6b7846d5d5-ddbqf"] Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.320019 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b7846d5d5-ddbqf" event={"ID":"960b2298-15f9-436b-93c9-04b0617c0c9b","Type":"ContainerStarted","Data":"97bc4d2b38fa9e4d4e921a4fcbc290ddc0e73b0481af2a2ce20714601f8a74b5"} Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.321451 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.732744 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.758588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbgxz\" (UniqueName: \"kubernetes.io/projected/d9199b78-2d93-4877-95b8-ed8457716a3f-kube-api-access-qbgxz\") pod \"d9199b78-2d93-4877-95b8-ed8457716a3f\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.758648 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-db-sync-config-data\") pod \"d9199b78-2d93-4877-95b8-ed8457716a3f\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.758767 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-combined-ca-bundle\") pod \"d9199b78-2d93-4877-95b8-ed8457716a3f\" (UID: \"d9199b78-2d93-4877-95b8-ed8457716a3f\") " Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.772003 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9199b78-2d93-4877-95b8-ed8457716a3f-kube-api-access-qbgxz" (OuterVolumeSpecName: "kube-api-access-qbgxz") pod "d9199b78-2d93-4877-95b8-ed8457716a3f" (UID: "d9199b78-2d93-4877-95b8-ed8457716a3f"). InnerVolumeSpecName "kube-api-access-qbgxz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.792325 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d9199b78-2d93-4877-95b8-ed8457716a3f" (UID: "d9199b78-2d93-4877-95b8-ed8457716a3f"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.850109 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9199b78-2d93-4877-95b8-ed8457716a3f" (UID: "d9199b78-2d93-4877-95b8-ed8457716a3f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.865055 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.865094 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbgxz\" (UniqueName: \"kubernetes.io/projected/d9199b78-2d93-4877-95b8-ed8457716a3f-kube-api-access-qbgxz\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:48 crc kubenswrapper[4857]: I1128 13:51:48.865106 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d9199b78-2d93-4877-95b8-ed8457716a3f-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.335111 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g4slf" event={"ID":"d9199b78-2d93-4877-95b8-ed8457716a3f","Type":"ContainerDied","Data":"8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930"} Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.335469 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.335357 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g4slf" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.345171 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b7846d5d5-ddbqf" event={"ID":"960b2298-15f9-436b-93c9-04b0617c0c9b","Type":"ContainerStarted","Data":"47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc"} Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.345227 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b7846d5d5-ddbqf" event={"ID":"960b2298-15f9-436b-93c9-04b0617c0c9b","Type":"ContainerStarted","Data":"f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c"} Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.416635 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6b7846d5d5-ddbqf" podStartSLOduration=2.416588326 podStartE2EDuration="2.416588326s" podCreationTimestamp="2025-11-28 13:51:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:49.364425302 +0000 UTC m=+1359.488366739" watchObservedRunningTime="2025-11-28 13:51:49.416588326 +0000 UTC m=+1359.540529763" Nov 28 13:51:49 crc kubenswrapper[4857]: E1128 13:51:49.447860 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice/crio-4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9199b78_2d93_4877_95b8_ed8457716a3f.slice/crio-8c7adae17053612bc7f8224008a84020ea260e11d92574780d1353ddb958a930\": RecentStats: unable to find data in memory cache]" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.580397 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7d59cc7587-wt4q5"] Nov 28 13:51:49 crc kubenswrapper[4857]: E1128 13:51:49.582198 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9199b78-2d93-4877-95b8-ed8457716a3f" containerName="barbican-db-sync" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.582229 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9199b78-2d93-4877-95b8-ed8457716a3f" containerName="barbican-db-sync" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.582480 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9199b78-2d93-4877-95b8-ed8457716a3f" containerName="barbican-db-sync" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.583740 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.609863 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.610085 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-pqw8m" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.610247 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.635536 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7d59cc7587-wt4q5"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.650001 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-d85b4cc9d-lptk6"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.651525 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.657810 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-d85b4cc9d-lptk6"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.658523 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714280 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws6jj\" (UniqueName: \"kubernetes.io/projected/d38848c8-5fdb-462f-8471-a0b1d2211b82-kube-api-access-ws6jj\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714324 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data-custom\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714345 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/791bae3e-043c-4a91-8e8b-d1d574dcb008-logs\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714377 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-combined-ca-bundle\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714402 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714429 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bghft\" (UniqueName: \"kubernetes.io/projected/791bae3e-043c-4a91-8e8b-d1d574dcb008-kube-api-access-bghft\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714449 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714485 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data-custom\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714505 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d38848c8-5fdb-462f-8471-a0b1d2211b82-logs\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.714522 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-combined-ca-bundle\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.737215 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-rjcr5"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.776457 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-5dlhj"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.778045 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.794513 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-f59f6f468-scr9q"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.796296 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.798253 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.814484 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f59f6f468-scr9q"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.816984 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws6jj\" (UniqueName: \"kubernetes.io/projected/d38848c8-5fdb-462f-8471-a0b1d2211b82-kube-api-access-ws6jj\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817043 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data-custom\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817076 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/791bae3e-043c-4a91-8e8b-d1d574dcb008-logs\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817120 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-combined-ca-bundle\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817159 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817202 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bghft\" (UniqueName: \"kubernetes.io/projected/791bae3e-043c-4a91-8e8b-d1d574dcb008-kube-api-access-bghft\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817237 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817293 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data-custom\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817319 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d38848c8-5fdb-462f-8471-a0b1d2211b82-logs\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.817342 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-combined-ca-bundle\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.825665 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/791bae3e-043c-4a91-8e8b-d1d574dcb008-logs\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.827158 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d38848c8-5fdb-462f-8471-a0b1d2211b82-logs\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.827973 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-combined-ca-bundle\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.830367 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.832228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-combined-ca-bundle\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.834687 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data-custom\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.840108 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data-custom\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.840733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.841232 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws6jj\" (UniqueName: \"kubernetes.io/projected/d38848c8-5fdb-462f-8471-a0b1d2211b82-kube-api-access-ws6jj\") pod \"barbican-keystone-listener-d85b4cc9d-lptk6\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.842846 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-5dlhj"] Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.854653 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bghft\" (UniqueName: \"kubernetes.io/projected/791bae3e-043c-4a91-8e8b-d1d574dcb008-kube-api-access-bghft\") pod \"barbican-worker-7d59cc7587-wt4q5\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923017 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923083 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923231 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923394 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-config\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923571 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b50ae785-eb22-4146-9a9e-9f85af67632c-logs\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923750 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data-custom\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923912 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bctj5\" (UniqueName: \"kubernetes.io/projected/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-kube-api-access-bctj5\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.923988 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.924044 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.924122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-combined-ca-bundle\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.924144 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpp2v\" (UniqueName: \"kubernetes.io/projected/b50ae785-eb22-4146-9a9e-9f85af67632c-kube-api-access-dpp2v\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:49 crc kubenswrapper[4857]: I1128 13:51:49.950700 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.005998 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026250 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-config\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026323 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b50ae785-eb22-4146-9a9e-9f85af67632c-logs\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026379 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data-custom\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026438 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bctj5\" (UniqueName: \"kubernetes.io/projected/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-kube-api-access-bctj5\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026472 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026508 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.026549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-combined-ca-bundle\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027385 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-svc\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027436 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpp2v\" (UniqueName: \"kubernetes.io/projected/b50ae785-eb22-4146-9a9e-9f85af67632c-kube-api-access-dpp2v\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027478 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027519 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b50ae785-eb22-4146-9a9e-9f85af67632c-logs\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027559 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027577 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-swift-storage-0\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.027888 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-config\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.028122 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-sb\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.028590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-nb\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.031910 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data-custom\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.033934 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.045782 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-combined-ca-bundle\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.047303 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bctj5\" (UniqueName: \"kubernetes.io/projected/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-kube-api-access-bctj5\") pod \"dnsmasq-dns-848cf88cfc-5dlhj\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.048204 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpp2v\" (UniqueName: \"kubernetes.io/projected/b50ae785-eb22-4146-9a9e-9f85af67632c-kube-api-access-dpp2v\") pod \"barbican-api-f59f6f468-scr9q\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.125683 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.237872 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.353909 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:51:50 crc kubenswrapper[4857]: I1128 13:51:50.354061 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerName="dnsmasq-dns" containerID="cri-o://ec4f5919f3ee799187f183844bf82284d6753f7bcc2ecfbf83199601e87b1f92" gracePeriod=10 Nov 28 13:51:51 crc kubenswrapper[4857]: I1128 13:51:51.364932 4857 generic.go:334] "Generic (PLEG): container finished" podID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerID="ec4f5919f3ee799187f183844bf82284d6753f7bcc2ecfbf83199601e87b1f92" exitCode=0 Nov 28 13:51:51 crc kubenswrapper[4857]: I1128 13:51:51.365869 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" event={"ID":"678fcbdb-0c5d-4b45-8c72-0fdca4b27761","Type":"ContainerDied","Data":"ec4f5919f3ee799187f183844bf82284d6753f7bcc2ecfbf83199601e87b1f92"} Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.146394 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-68dfcc5468-bgz8k"] Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.148340 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.153064 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.153239 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.160358 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68dfcc5468-bgz8k"] Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212489 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9zrs\" (UniqueName: \"kubernetes.io/projected/63f7c690-a408-4e1f-8959-b22badb1b9dc-kube-api-access-m9zrs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212563 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-internal-tls-certs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212599 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63f7c690-a408-4e1f-8959-b22badb1b9dc-logs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212622 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data-custom\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212789 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-combined-ca-bundle\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.212832 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314665 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9zrs\" (UniqueName: \"kubernetes.io/projected/63f7c690-a408-4e1f-8959-b22badb1b9dc-kube-api-access-m9zrs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314752 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-internal-tls-certs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314802 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63f7c690-a408-4e1f-8959-b22badb1b9dc-logs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314825 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data-custom\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314850 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314924 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-combined-ca-bundle\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.314940 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.315770 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63f7c690-a408-4e1f-8959-b22badb1b9dc-logs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.322894 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-internal-tls-certs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.323769 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-combined-ca-bundle\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.325368 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data-custom\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.329504 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.335278 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.336826 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9zrs\" (UniqueName: \"kubernetes.io/projected/63f7c690-a408-4e1f-8959-b22badb1b9dc-kube-api-access-m9zrs\") pod \"barbican-api-68dfcc5468-bgz8k\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.383923 4857 generic.go:334] "Generic (PLEG): container finished" podID="aebc3704-6a31-4813-8826-622ffb7f6934" containerID="83ec5382a77be899a0142fdd5c00d8ed5b441bf19df2a875fdd024a926571dce" exitCode=0 Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.383979 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-f9rw6" event={"ID":"aebc3704-6a31-4813-8826-622ffb7f6934","Type":"ContainerDied","Data":"83ec5382a77be899a0142fdd5c00d8ed5b441bf19df2a875fdd024a926571dce"} Nov 28 13:51:52 crc kubenswrapper[4857]: I1128 13:51:52.468119 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.763493 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.768948 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849607 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-sb\") pod \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849663 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-config-data\") pod \"aebc3704-6a31-4813-8826-622ffb7f6934\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849747 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-config\") pod \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849815 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aebc3704-6a31-4813-8826-622ffb7f6934-etc-machine-id\") pod \"aebc3704-6a31-4813-8826-622ffb7f6934\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849843 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2b2pj\" (UniqueName: \"kubernetes.io/projected/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-kube-api-access-2b2pj\") pod \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849879 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-nb\") pod \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849957 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-scripts\") pod \"aebc3704-6a31-4813-8826-622ffb7f6934\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.849998 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-db-sync-config-data\") pod \"aebc3704-6a31-4813-8826-622ffb7f6934\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.850028 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m64mr\" (UniqueName: \"kubernetes.io/projected/aebc3704-6a31-4813-8826-622ffb7f6934-kube-api-access-m64mr\") pod \"aebc3704-6a31-4813-8826-622ffb7f6934\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.850083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-combined-ca-bundle\") pod \"aebc3704-6a31-4813-8826-622ffb7f6934\" (UID: \"aebc3704-6a31-4813-8826-622ffb7f6934\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.850144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-svc\") pod \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.850181 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-swift-storage-0\") pod \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\" (UID: \"678fcbdb-0c5d-4b45-8c72-0fdca4b27761\") " Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.855743 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-scripts" (OuterVolumeSpecName: "scripts") pod "aebc3704-6a31-4813-8826-622ffb7f6934" (UID: "aebc3704-6a31-4813-8826-622ffb7f6934"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.859527 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/aebc3704-6a31-4813-8826-622ffb7f6934-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "aebc3704-6a31-4813-8826-622ffb7f6934" (UID: "aebc3704-6a31-4813-8826-622ffb7f6934"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.861142 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aebc3704-6a31-4813-8826-622ffb7f6934-kube-api-access-m64mr" (OuterVolumeSpecName: "kube-api-access-m64mr") pod "aebc3704-6a31-4813-8826-622ffb7f6934" (UID: "aebc3704-6a31-4813-8826-622ffb7f6934"). InnerVolumeSpecName "kube-api-access-m64mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.880123 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-kube-api-access-2b2pj" (OuterVolumeSpecName: "kube-api-access-2b2pj") pod "678fcbdb-0c5d-4b45-8c72-0fdca4b27761" (UID: "678fcbdb-0c5d-4b45-8c72-0fdca4b27761"). InnerVolumeSpecName "kube-api-access-2b2pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.880537 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "aebc3704-6a31-4813-8826-622ffb7f6934" (UID: "aebc3704-6a31-4813-8826-622ffb7f6934"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.923995 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "678fcbdb-0c5d-4b45-8c72-0fdca4b27761" (UID: "678fcbdb-0c5d-4b45-8c72-0fdca4b27761"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.947458 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "678fcbdb-0c5d-4b45-8c72-0fdca4b27761" (UID: "678fcbdb-0c5d-4b45-8c72-0fdca4b27761"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953112 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953173 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/aebc3704-6a31-4813-8826-622ffb7f6934-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953194 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2b2pj\" (UniqueName: \"kubernetes.io/projected/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-kube-api-access-2b2pj\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953210 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953223 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953236 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.953247 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m64mr\" (UniqueName: \"kubernetes.io/projected/aebc3704-6a31-4813-8826-622ffb7f6934-kube-api-access-m64mr\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.955286 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "678fcbdb-0c5d-4b45-8c72-0fdca4b27761" (UID: "678fcbdb-0c5d-4b45-8c72-0fdca4b27761"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.956090 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-config" (OuterVolumeSpecName: "config") pod "678fcbdb-0c5d-4b45-8c72-0fdca4b27761" (UID: "678fcbdb-0c5d-4b45-8c72-0fdca4b27761"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.958707 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "678fcbdb-0c5d-4b45-8c72-0fdca4b27761" (UID: "678fcbdb-0c5d-4b45-8c72-0fdca4b27761"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.966406 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aebc3704-6a31-4813-8826-622ffb7f6934" (UID: "aebc3704-6a31-4813-8826-622ffb7f6934"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:53 crc kubenswrapper[4857]: I1128 13:51:53.991034 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-config-data" (OuterVolumeSpecName: "config-data") pod "aebc3704-6a31-4813-8826-622ffb7f6934" (UID: "aebc3704-6a31-4813-8826-622ffb7f6934"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.055257 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.055305 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.055319 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.055330 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/678fcbdb-0c5d-4b45-8c72-0fdca4b27761-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.055341 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aebc3704-6a31-4813-8826-622ffb7f6934-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.403846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" event={"ID":"678fcbdb-0c5d-4b45-8c72-0fdca4b27761","Type":"ContainerDied","Data":"7c822dd76497e77c14b033f71034235beb8702aff6c91ec70d9b2a75c77d55bc"} Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.403893 4857 scope.go:117] "RemoveContainer" containerID="ec4f5919f3ee799187f183844bf82284d6753f7bcc2ecfbf83199601e87b1f92" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.404108 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b7b667979-rjcr5" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.411303 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-f9rw6" event={"ID":"aebc3704-6a31-4813-8826-622ffb7f6934","Type":"ContainerDied","Data":"71bb2690ad07b8094b06711485d61ff101d924539bab8b0fa405a7467ff637b0"} Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.411350 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71bb2690ad07b8094b06711485d61ff101d924539bab8b0fa405a7467ff637b0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.411420 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-f9rw6" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.431031 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-rjcr5"] Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.446028 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b7b667979-rjcr5"] Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.644846 4857 scope.go:117] "RemoveContainer" containerID="04b7d9e4e1f4eb22b2df6eb83ed320e8c151a46c20e786d5164f458a3e16ea60" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.794165 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:51:54 crc kubenswrapper[4857]: E1128 13:51:54.816667 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerName="init" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.816693 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerName="init" Nov 28 13:51:54 crc kubenswrapper[4857]: E1128 13:51:54.816710 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aebc3704-6a31-4813-8826-622ffb7f6934" containerName="cinder-db-sync" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.816717 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aebc3704-6a31-4813-8826-622ffb7f6934" containerName="cinder-db-sync" Nov 28 13:51:54 crc kubenswrapper[4857]: E1128 13:51:54.816729 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerName="dnsmasq-dns" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.816735 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerName="dnsmasq-dns" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.816902 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aebc3704-6a31-4813-8826-622ffb7f6934" containerName="cinder-db-sync" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.816945 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" containerName="dnsmasq-dns" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.818429 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.823626 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.823775 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.823549 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-d8fnj" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.823964 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.873999 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-scripts\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.874507 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc32938-ce78-40f6-b288-939f758103fb-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.874535 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.874592 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.874686 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgrw9\" (UniqueName: \"kubernetes.io/projected/0bc32938-ce78-40f6-b288-939f758103fb-kube-api-access-wgrw9\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.874753 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.898381 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.919109 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-5dlhj"] Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.970471 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-rjfmd"] Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.972757 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.979514 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.979655 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-scripts\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.979732 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc32938-ce78-40f6-b288-939f758103fb-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.979783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.979876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.980013 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgrw9\" (UniqueName: \"kubernetes.io/projected/0bc32938-ce78-40f6-b288-939f758103fb-kube-api-access-wgrw9\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.980662 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc32938-ce78-40f6-b288-939f758103fb-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:54 crc kubenswrapper[4857]: I1128 13:51:54.982392 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-rjfmd"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.015446 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.015481 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.015518 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-scripts\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.015972 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.028000 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.029799 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.030794 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgrw9\" (UniqueName: \"kubernetes.io/projected/0bc32938-ce78-40f6-b288-939f758103fb-kube-api-access-wgrw9\") pod \"cinder-scheduler-0\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " pod="openstack/cinder-scheduler-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.032690 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.053091 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.087656 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088120 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088179 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088208 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088256 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-svc\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088287 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data-custom\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088359 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg9z2\" (UniqueName: \"kubernetes.io/projected/36812b7a-b490-4d20-b0d4-9ab146485617-kube-api-access-gg9z2\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-config\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088400 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36812b7a-b490-4d20-b0d4-9ab146485617-logs\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088442 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4m9q\" (UniqueName: \"kubernetes.io/projected/f524461b-de2f-48eb-a378-c255d35327c4-kube-api-access-g4m9q\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088464 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-scripts\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.088511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/36812b7a-b490-4d20-b0d4-9ab146485617-etc-machine-id\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.164453 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190122 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190180 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190215 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190261 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190284 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-svc\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190308 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190328 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data-custom\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190340 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg9z2\" (UniqueName: \"kubernetes.io/projected/36812b7a-b490-4d20-b0d4-9ab146485617-kube-api-access-gg9z2\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190356 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-config\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190375 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36812b7a-b490-4d20-b0d4-9ab146485617-logs\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190394 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4m9q\" (UniqueName: \"kubernetes.io/projected/f524461b-de2f-48eb-a378-c255d35327c4-kube-api-access-g4m9q\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190413 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-scripts\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190436 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/36812b7a-b490-4d20-b0d4-9ab146485617-etc-machine-id\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.190528 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/36812b7a-b490-4d20-b0d4-9ab146485617-etc-machine-id\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.191337 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-swift-storage-0\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.191893 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-nb\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.195553 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36812b7a-b490-4d20-b0d4-9ab146485617-logs\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.196443 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-sb\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.196494 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.196618 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-config\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.197053 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-svc\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.207889 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data-custom\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.216054 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-scripts\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.225982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.231696 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg9z2\" (UniqueName: \"kubernetes.io/projected/36812b7a-b490-4d20-b0d4-9ab146485617-kube-api-access-gg9z2\") pod \"cinder-api-0\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.258954 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4m9q\" (UniqueName: \"kubernetes.io/projected/f524461b-de2f-48eb-a378-c255d35327c4-kube-api-access-g4m9q\") pod \"dnsmasq-dns-6578955fd5-rjfmd\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.424434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.428788 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.436074 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerStarted","Data":"dac2f631af2dd13cbefd63029290ff4e103fbb366fa60c50cb33a3e5334757da"} Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.436238 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-central-agent" containerID="cri-o://469ba51de67f78e9d88dca6693a397f6a6636c87ca3e4120dd8b5a9837fd6dc3" gracePeriod=30 Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.436531 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.436771 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="proxy-httpd" containerID="cri-o://dac2f631af2dd13cbefd63029290ff4e103fbb366fa60c50cb33a3e5334757da" gracePeriod=30 Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.436815 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="sg-core" containerID="cri-o://9b6c4bbe152e866f7792de341cdba73138d078a52439a73ea5c1560ffc423419" gracePeriod=30 Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.436847 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-notification-agent" containerID="cri-o://c8b8591445954b9e685515c84c1d977b8750fa6e93237284c1d33ce8ceabed21" gracePeriod=30 Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.453474 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7d59cc7587-wt4q5"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.484202 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f59f6f468-scr9q"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.513251 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-68dfcc5468-bgz8k"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.518409 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.720799108 podStartE2EDuration="48.518381674s" podCreationTimestamp="2025-11-28 13:51:07 +0000 UTC" firstStartedPulling="2025-11-28 13:51:09.069884694 +0000 UTC m=+1319.193826131" lastFinishedPulling="2025-11-28 13:51:54.86746726 +0000 UTC m=+1364.991408697" observedRunningTime="2025-11-28 13:51:55.463150179 +0000 UTC m=+1365.587091616" watchObservedRunningTime="2025-11-28 13:51:55.518381674 +0000 UTC m=+1365.642323111" Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.791894 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-d85b4cc9d-lptk6"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.804356 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-5dlhj"] Nov 28 13:51:55 crc kubenswrapper[4857]: I1128 13:51:55.980560 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.147811 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.226763 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-rjfmd"] Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.272960 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="678fcbdb-0c5d-4b45-8c72-0fdca4b27761" path="/var/lib/kubelet/pods/678fcbdb-0c5d-4b45-8c72-0fdca4b27761/volumes" Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.459799 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"36812b7a-b490-4d20-b0d4-9ab146485617","Type":"ContainerStarted","Data":"06346fab270d91bab4aa88ecef28c5e25b205d65caef31d2ea41143d589bf46a"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.461521 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68dfcc5468-bgz8k" event={"ID":"63f7c690-a408-4e1f-8959-b22badb1b9dc","Type":"ContainerStarted","Data":"e64b362c95aa7cd19c2a82ea8df71ad08ba359967fcd92b776ad971e0f91060e"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.463205 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7d59cc7587-wt4q5" event={"ID":"791bae3e-043c-4a91-8e8b-d1d574dcb008","Type":"ContainerStarted","Data":"0b7b9479c88e90b598b53d39451837a5437d8c6b571a24c0bbcac4880d5502d4"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.464534 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" event={"ID":"5c8d91b6-c5e9-42f3-90e6-bd6b57446429","Type":"ContainerStarted","Data":"89fe7b6484d9b13b080dd437499db8b596aa7f0b3351af0fa60aade3723263f1"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.466995 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f59f6f468-scr9q" event={"ID":"b50ae785-eb22-4146-9a9e-9f85af67632c","Type":"ContainerStarted","Data":"fb4ec4691b50c4e88b317977963163991d47a883448c6c4d030f4ab79e69a043"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.469369 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" event={"ID":"d38848c8-5fdb-462f-8471-a0b1d2211b82","Type":"ContainerStarted","Data":"f60d3971b99dc2a4bf805e7c987b0264b70e269e775dd4acfad310a452479169"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.473190 4857 generic.go:334] "Generic (PLEG): container finished" podID="9b948893-d309-41ca-987c-287ee0b12ef2" containerID="9b6c4bbe152e866f7792de341cdba73138d078a52439a73ea5c1560ffc423419" exitCode=2 Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.473239 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerDied","Data":"9b6c4bbe152e866f7792de341cdba73138d078a52439a73ea5c1560ffc423419"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.474596 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" event={"ID":"f524461b-de2f-48eb-a378-c255d35327c4","Type":"ContainerStarted","Data":"7acf3b9e2dbfae45fd1e80710cb25da550f8697cddd1c54f86373c5f8512d657"} Nov 28 13:51:56 crc kubenswrapper[4857]: I1128 13:51:56.481526 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc32938-ce78-40f6-b288-939f758103fb","Type":"ContainerStarted","Data":"7313f6fc12d83404a717e915ec7f608e8660a762e895d602cb67c810b845037b"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.508025 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"36812b7a-b490-4d20-b0d4-9ab146485617","Type":"ContainerStarted","Data":"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.521175 4857 generic.go:334] "Generic (PLEG): container finished" podID="f524461b-de2f-48eb-a378-c255d35327c4" containerID="79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b" exitCode=0 Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.521290 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" event={"ID":"f524461b-de2f-48eb-a378-c255d35327c4","Type":"ContainerDied","Data":"79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.533903 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68dfcc5468-bgz8k" event={"ID":"63f7c690-a408-4e1f-8959-b22badb1b9dc","Type":"ContainerStarted","Data":"4b9f9dd6d4c0768cd26615fd0b66fda62c0a3074685c2e3db7b9ec59c30f07ed"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.536534 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f59f6f468-scr9q" event={"ID":"b50ae785-eb22-4146-9a9e-9f85af67632c","Type":"ContainerStarted","Data":"a5c0da2b65c34b00d3fbe2b28448d1bfb0ec06ae80ce69786b464ebef8e2a25c"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.540272 4857 generic.go:334] "Generic (PLEG): container finished" podID="5c8d91b6-c5e9-42f3-90e6-bd6b57446429" containerID="5382374ceafa21081a768cf3678135b0312dd539bd14ce285ecaec4ce8bddc73" exitCode=0 Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.540316 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" event={"ID":"5c8d91b6-c5e9-42f3-90e6-bd6b57446429","Type":"ContainerDied","Data":"5382374ceafa21081a768cf3678135b0312dd539bd14ce285ecaec4ce8bddc73"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.552074 4857 generic.go:334] "Generic (PLEG): container finished" podID="9b948893-d309-41ca-987c-287ee0b12ef2" containerID="469ba51de67f78e9d88dca6693a397f6a6636c87ca3e4120dd8b5a9837fd6dc3" exitCode=0 Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.552127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerDied","Data":"469ba51de67f78e9d88dca6693a397f6a6636c87ca3e4120dd8b5a9837fd6dc3"} Nov 28 13:51:57 crc kubenswrapper[4857]: I1128 13:51:57.552529 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.479519 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.566084 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f59f6f468-scr9q" event={"ID":"b50ae785-eb22-4146-9a9e-9f85af67632c","Type":"ContainerStarted","Data":"80755f1ba19e862974f4ce9bd788f0b8def82c945ad81d7dcef824cfe8524303"} Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.566136 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.566158 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.569559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" event={"ID":"5c8d91b6-c5e9-42f3-90e6-bd6b57446429","Type":"ContainerDied","Data":"89fe7b6484d9b13b080dd437499db8b596aa7f0b3351af0fa60aade3723263f1"} Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.569602 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-848cf88cfc-5dlhj" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.569613 4857 scope.go:117] "RemoveContainer" containerID="5382374ceafa21081a768cf3678135b0312dd539bd14ce285ecaec4ce8bddc73" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.571883 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"36812b7a-b490-4d20-b0d4-9ab146485617","Type":"ContainerStarted","Data":"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac"} Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.572080 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api-log" containerID="cri-o://ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc" gracePeriod=30 Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.572134 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.572179 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api" containerID="cri-o://5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac" gracePeriod=30 Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.576349 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68dfcc5468-bgz8k" event={"ID":"63f7c690-a408-4e1f-8959-b22badb1b9dc","Type":"ContainerStarted","Data":"32f2e88243b10a53bc37602a5a649cb5b43c66d0e3d47ca1f87ead6c5ef19c53"} Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.576835 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-sb\") pod \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.576934 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bctj5\" (UniqueName: \"kubernetes.io/projected/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-kube-api-access-bctj5\") pod \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.580931 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-config\") pod \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.581155 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-svc\") pod \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.581335 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-nb\") pod \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.581521 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-swift-storage-0\") pod \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\" (UID: \"5c8d91b6-c5e9-42f3-90e6-bd6b57446429\") " Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.576526 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.588838 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.605761 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-f59f6f468-scr9q" podStartSLOduration=9.605735588 podStartE2EDuration="9.605735588s" podCreationTimestamp="2025-11-28 13:51:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:58.592367126 +0000 UTC m=+1368.716308573" watchObservedRunningTime="2025-11-28 13:51:58.605735588 +0000 UTC m=+1368.729677035" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.611475 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-kube-api-access-bctj5" (OuterVolumeSpecName: "kube-api-access-bctj5") pod "5c8d91b6-c5e9-42f3-90e6-bd6b57446429" (UID: "5c8d91b6-c5e9-42f3-90e6-bd6b57446429"). InnerVolumeSpecName "kube-api-access-bctj5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.618463 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5c8d91b6-c5e9-42f3-90e6-bd6b57446429" (UID: "5c8d91b6-c5e9-42f3-90e6-bd6b57446429"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.624291 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5c8d91b6-c5e9-42f3-90e6-bd6b57446429" (UID: "5c8d91b6-c5e9-42f3-90e6-bd6b57446429"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.626863 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-config" (OuterVolumeSpecName: "config") pod "5c8d91b6-c5e9-42f3-90e6-bd6b57446429" (UID: "5c8d91b6-c5e9-42f3-90e6-bd6b57446429"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.635613 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5c8d91b6-c5e9-42f3-90e6-bd6b57446429" (UID: "5c8d91b6-c5e9-42f3-90e6-bd6b57446429"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.659081 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5c8d91b6-c5e9-42f3-90e6-bd6b57446429" (UID: "5c8d91b6-c5e9-42f3-90e6-bd6b57446429"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.666998 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.666976371 podStartE2EDuration="4.666976371s" podCreationTimestamp="2025-11-28 13:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:58.660184912 +0000 UTC m=+1368.784126349" watchObservedRunningTime="2025-11-28 13:51:58.666976371 +0000 UTC m=+1368.790917808" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.676208 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-68dfcc5468-bgz8k" podStartSLOduration=6.676189294 podStartE2EDuration="6.676189294s" podCreationTimestamp="2025-11-28 13:51:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:58.62783143 +0000 UTC m=+1368.751772867" watchObservedRunningTime="2025-11-28 13:51:58.676189294 +0000 UTC m=+1368.800130721" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.695302 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.695333 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.695343 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.695352 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bctj5\" (UniqueName: \"kubernetes.io/projected/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-kube-api-access-bctj5\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.695363 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.695373 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5c8d91b6-c5e9-42f3-90e6-bd6b57446429-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.926338 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-5dlhj"] Nov 28 13:51:58 crc kubenswrapper[4857]: I1128 13:51:58.934483 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-848cf88cfc-5dlhj"] Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.238321 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.307806 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data-custom\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.307857 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gg9z2\" (UniqueName: \"kubernetes.io/projected/36812b7a-b490-4d20-b0d4-9ab146485617-kube-api-access-gg9z2\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.307934 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308010 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-combined-ca-bundle\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308037 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/36812b7a-b490-4d20-b0d4-9ab146485617-etc-machine-id\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36812b7a-b490-4d20-b0d4-9ab146485617-logs\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308118 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-scripts\") pod \"36812b7a-b490-4d20-b0d4-9ab146485617\" (UID: \"36812b7a-b490-4d20-b0d4-9ab146485617\") " Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308354 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/36812b7a-b490-4d20-b0d4-9ab146485617-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308645 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/36812b7a-b490-4d20-b0d4-9ab146485617-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.308688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36812b7a-b490-4d20-b0d4-9ab146485617-logs" (OuterVolumeSpecName: "logs") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.312289 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.312267 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36812b7a-b490-4d20-b0d4-9ab146485617-kube-api-access-gg9z2" (OuterVolumeSpecName: "kube-api-access-gg9z2") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "kube-api-access-gg9z2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.312801 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-scripts" (OuterVolumeSpecName: "scripts") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.344424 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.376323 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data" (OuterVolumeSpecName: "config-data") pod "36812b7a-b490-4d20-b0d4-9ab146485617" (UID: "36812b7a-b490-4d20-b0d4-9ab146485617"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.409841 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.409871 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gg9z2\" (UniqueName: \"kubernetes.io/projected/36812b7a-b490-4d20-b0d4-9ab146485617-kube-api-access-gg9z2\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.409882 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.409891 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.409901 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/36812b7a-b490-4d20-b0d4-9ab146485617-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.409909 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/36812b7a-b490-4d20-b0d4-9ab146485617-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.609312 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7d59cc7587-wt4q5" event={"ID":"791bae3e-043c-4a91-8e8b-d1d574dcb008","Type":"ContainerStarted","Data":"4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.609366 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7d59cc7587-wt4q5" event={"ID":"791bae3e-043c-4a91-8e8b-d1d574dcb008","Type":"ContainerStarted","Data":"184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.617458 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" event={"ID":"d38848c8-5fdb-462f-8471-a0b1d2211b82","Type":"ContainerStarted","Data":"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.617841 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" event={"ID":"d38848c8-5fdb-462f-8471-a0b1d2211b82","Type":"ContainerStarted","Data":"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620630 4857 generic.go:334] "Generic (PLEG): container finished" podID="36812b7a-b490-4d20-b0d4-9ab146485617" containerID="5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac" exitCode=0 Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620654 4857 generic.go:334] "Generic (PLEG): container finished" podID="36812b7a-b490-4d20-b0d4-9ab146485617" containerID="ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc" exitCode=143 Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620696 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"36812b7a-b490-4d20-b0d4-9ab146485617","Type":"ContainerDied","Data":"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620720 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"36812b7a-b490-4d20-b0d4-9ab146485617","Type":"ContainerDied","Data":"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"36812b7a-b490-4d20-b0d4-9ab146485617","Type":"ContainerDied","Data":"06346fab270d91bab4aa88ecef28c5e25b205d65caef31d2ea41143d589bf46a"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620752 4857 scope.go:117] "RemoveContainer" containerID="5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.620898 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.640214 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" event={"ID":"f524461b-de2f-48eb-a378-c255d35327c4","Type":"ContainerStarted","Data":"db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da"} Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.657253 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7d59cc7587-wt4q5" podStartSLOduration=7.216046389 podStartE2EDuration="10.657232283s" podCreationTimestamp="2025-11-28 13:51:49 +0000 UTC" firstStartedPulling="2025-11-28 13:51:55.492349678 +0000 UTC m=+1365.616291115" lastFinishedPulling="2025-11-28 13:51:58.933535572 +0000 UTC m=+1369.057477009" observedRunningTime="2025-11-28 13:51:59.628566378 +0000 UTC m=+1369.752507815" watchObservedRunningTime="2025-11-28 13:51:59.657232283 +0000 UTC m=+1369.781173720" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.665888 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" podStartSLOduration=7.485738772 podStartE2EDuration="10.66586649s" podCreationTimestamp="2025-11-28 13:51:49 +0000 UTC" firstStartedPulling="2025-11-28 13:51:55.796074108 +0000 UTC m=+1365.920015545" lastFinishedPulling="2025-11-28 13:51:58.976201826 +0000 UTC m=+1369.100143263" observedRunningTime="2025-11-28 13:51:59.655407205 +0000 UTC m=+1369.779348642" watchObservedRunningTime="2025-11-28 13:51:59.66586649 +0000 UTC m=+1369.789807927" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.691477 4857 scope.go:117] "RemoveContainer" containerID="ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.711210 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" podStartSLOduration=5.711184614 podStartE2EDuration="5.711184614s" podCreationTimestamp="2025-11-28 13:51:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:51:59.691645339 +0000 UTC m=+1369.815586776" watchObservedRunningTime="2025-11-28 13:51:59.711184614 +0000 UTC m=+1369.835126051" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.713152 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.725314 4857 scope.go:117] "RemoveContainer" containerID="5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac" Nov 28 13:51:59 crc kubenswrapper[4857]: E1128 13:51:59.725957 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac\": container with ID starting with 5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac not found: ID does not exist" containerID="5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.726001 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac"} err="failed to get container status \"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac\": rpc error: code = NotFound desc = could not find container \"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac\": container with ID starting with 5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac not found: ID does not exist" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.726147 4857 scope.go:117] "RemoveContainer" containerID="ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.727739 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:59 crc kubenswrapper[4857]: E1128 13:51:59.739123 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc\": container with ID starting with ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc not found: ID does not exist" containerID="ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.739172 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc"} err="failed to get container status \"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc\": rpc error: code = NotFound desc = could not find container \"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc\": container with ID starting with ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc not found: ID does not exist" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.739209 4857 scope.go:117] "RemoveContainer" containerID="5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.744104 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac"} err="failed to get container status \"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac\": rpc error: code = NotFound desc = could not find container \"5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac\": container with ID starting with 5e104f497031a9e42208f67a011afa78da5678221f0881a47e92a152555e84ac not found: ID does not exist" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.744156 4857 scope.go:117] "RemoveContainer" containerID="ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.744510 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc"} err="failed to get container status \"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc\": rpc error: code = NotFound desc = could not find container \"ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc\": container with ID starting with ffcda9ea01e0cf537c6745419a3561d47204dc694dea5689cc4657b00f36f3fc not found: ID does not exist" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.774340 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:59 crc kubenswrapper[4857]: E1128 13:51:59.774740 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api-log" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.774758 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api-log" Nov 28 13:51:59 crc kubenswrapper[4857]: E1128 13:51:59.774775 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.774781 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api" Nov 28 13:51:59 crc kubenswrapper[4857]: E1128 13:51:59.774803 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c8d91b6-c5e9-42f3-90e6-bd6b57446429" containerName="init" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.774808 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c8d91b6-c5e9-42f3-90e6-bd6b57446429" containerName="init" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.774995 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.775010 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c8d91b6-c5e9-42f3-90e6-bd6b57446429" containerName="init" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.775027 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" containerName="cinder-api-log" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.775986 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.782434 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.782609 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.782796 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 28 13:51:59 crc kubenswrapper[4857]: E1128 13:51:59.790844 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice/crio-4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod36812b7a_b490_4d20_b0d4_9ab146485617.slice/crio-06346fab270d91bab4aa88ecef28c5e25b205d65caef31d2ea41143d589bf46a\": RecentStats: unable to find data in memory cache]" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.792068 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929633 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d973a56d-fc8e-4cef-a590-d21d32242dc4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929703 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929723 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929869 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbqm7\" (UniqueName: \"kubernetes.io/projected/d973a56d-fc8e-4cef-a590-d21d32242dc4-kube-api-access-lbqm7\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929914 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d973a56d-fc8e-4cef-a590-d21d32242dc4-logs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929965 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-scripts\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.929981 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data-custom\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.930006 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:51:59 crc kubenswrapper[4857]: I1128 13:51:59.930198 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032416 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d973a56d-fc8e-4cef-a590-d21d32242dc4-logs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032769 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-scripts\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032787 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data-custom\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032803 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032836 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032872 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d973a56d-fc8e-4cef-a590-d21d32242dc4-logs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032939 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d973a56d-fc8e-4cef-a590-d21d32242dc4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.032995 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.033015 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.033049 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbqm7\" (UniqueName: \"kubernetes.io/projected/d973a56d-fc8e-4cef-a590-d21d32242dc4-kube-api-access-lbqm7\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.033627 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d973a56d-fc8e-4cef-a590-d21d32242dc4-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.037082 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.037478 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-scripts\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.037979 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.038658 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-public-tls-certs\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.038864 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data-custom\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.042919 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.054458 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbqm7\" (UniqueName: \"kubernetes.io/projected/d973a56d-fc8e-4cef-a590-d21d32242dc4-kube-api-access-lbqm7\") pod \"cinder-api-0\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.244140 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36812b7a-b490-4d20-b0d4-9ab146485617" path="/var/lib/kubelet/pods/36812b7a-b490-4d20-b0d4-9ab146485617/volumes" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.244772 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.246386 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c8d91b6-c5e9-42f3-90e6-bd6b57446429" path="/var/lib/kubelet/pods/5c8d91b6-c5e9-42f3-90e6-bd6b57446429/volumes" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.425272 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.656320 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc32938-ce78-40f6-b288-939f758103fb","Type":"ContainerStarted","Data":"9cfdecf7caf62c080c8cfd85adc522a1a171aa64e83ba6e3f857dabfa18a1e30"} Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.656359 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc32938-ce78-40f6-b288-939f758103fb","Type":"ContainerStarted","Data":"e0bb363016e6f4cbc982924e7be917fee5e3be447b9969c215fd22c123388d00"} Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.689118 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.735320224 podStartE2EDuration="6.68909748s" podCreationTimestamp="2025-11-28 13:51:54 +0000 UTC" firstStartedPulling="2025-11-28 13:51:55.979760036 +0000 UTC m=+1366.103701463" lastFinishedPulling="2025-11-28 13:51:58.933537282 +0000 UTC m=+1369.057478719" observedRunningTime="2025-11-28 13:52:00.687085808 +0000 UTC m=+1370.811027245" watchObservedRunningTime="2025-11-28 13:52:00.68909748 +0000 UTC m=+1370.813038917" Nov 28 13:52:00 crc kubenswrapper[4857]: I1128 13:52:00.718214 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:52:01 crc kubenswrapper[4857]: I1128 13:52:01.683994 4857 generic.go:334] "Generic (PLEG): container finished" podID="9b948893-d309-41ca-987c-287ee0b12ef2" containerID="c8b8591445954b9e685515c84c1d977b8750fa6e93237284c1d33ce8ceabed21" exitCode=0 Nov 28 13:52:01 crc kubenswrapper[4857]: I1128 13:52:01.684174 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerDied","Data":"c8b8591445954b9e685515c84c1d977b8750fa6e93237284c1d33ce8ceabed21"} Nov 28 13:52:01 crc kubenswrapper[4857]: I1128 13:52:01.693270 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d973a56d-fc8e-4cef-a590-d21d32242dc4","Type":"ContainerStarted","Data":"2393368f1d7599e575bd2865623f799eecabedba72c2c1b25e2a5dd440954069"} Nov 28 13:52:01 crc kubenswrapper[4857]: I1128 13:52:01.693319 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d973a56d-fc8e-4cef-a590-d21d32242dc4","Type":"ContainerStarted","Data":"a5e7d1f145a9ce55b38a7c1ad7a592dcbaca7df0911162ddeba3a09a4c960457"} Nov 28 13:52:02 crc kubenswrapper[4857]: I1128 13:52:02.707493 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d973a56d-fc8e-4cef-a590-d21d32242dc4","Type":"ContainerStarted","Data":"64a76382fda6f51faa4f808e7cb3cf14ee31449fa967229b15df56ca85a66806"} Nov 28 13:52:02 crc kubenswrapper[4857]: I1128 13:52:02.709822 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 13:52:02 crc kubenswrapper[4857]: I1128 13:52:02.746671 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.746639991 podStartE2EDuration="3.746639991s" podCreationTimestamp="2025-11-28 13:51:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:52:02.739993436 +0000 UTC m=+1372.863934913" watchObservedRunningTime="2025-11-28 13:52:02.746639991 +0000 UTC m=+1372.870581468" Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.892441 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.920803 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.977079 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f59f6f468-scr9q"] Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.977421 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f59f6f468-scr9q" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api-log" containerID="cri-o://a5c0da2b65c34b00d3fbe2b28448d1bfb0ec06ae80ce69786b464ebef8e2a25c" gracePeriod=30 Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.978431 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f59f6f468-scr9q" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api" containerID="cri-o://80755f1ba19e862974f4ce9bd788f0b8def82c945ad81d7dcef824cfe8524303" gracePeriod=30 Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.989922 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f59f6f468-scr9q" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": EOF" Nov 28 13:52:03 crc kubenswrapper[4857]: I1128 13:52:03.990209 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f59f6f468-scr9q" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": EOF" Nov 28 13:52:04 crc kubenswrapper[4857]: I1128 13:52:04.748456 4857 generic.go:334] "Generic (PLEG): container finished" podID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerID="a5c0da2b65c34b00d3fbe2b28448d1bfb0ec06ae80ce69786b464ebef8e2a25c" exitCode=143 Nov 28 13:52:04 crc kubenswrapper[4857]: I1128 13:52:04.748532 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f59f6f468-scr9q" event={"ID":"b50ae785-eb22-4146-9a9e-9f85af67632c","Type":"ContainerDied","Data":"a5c0da2b65c34b00d3fbe2b28448d1bfb0ec06ae80ce69786b464ebef8e2a25c"} Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.165248 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.400277 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.427162 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.488516 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-ngbtv"] Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.488748 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerName="dnsmasq-dns" containerID="cri-o://e3de1d602e550769f65ce7b67f332e8faf3bf620ee41df192da5a83fde0c9cea" gracePeriod=10 Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.763784 4857 generic.go:334] "Generic (PLEG): container finished" podID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerID="e3de1d602e550769f65ce7b67f332e8faf3bf620ee41df192da5a83fde0c9cea" exitCode=0 Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.764610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" event={"ID":"1536ae25-51ba-49cc-af6f-7c7b9ceff289","Type":"ContainerDied","Data":"e3de1d602e550769f65ce7b67f332e8faf3bf620ee41df192da5a83fde0c9cea"} Nov 28 13:52:05 crc kubenswrapper[4857]: I1128 13:52:05.905848 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.114036 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.275144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-swift-storage-0\") pod \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.275205 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-config\") pod \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.275317 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-nb\") pod \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.275352 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-svc\") pod \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.275396 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t57sm\" (UniqueName: \"kubernetes.io/projected/1536ae25-51ba-49cc-af6f-7c7b9ceff289-kube-api-access-t57sm\") pod \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.275427 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-sb\") pod \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\" (UID: \"1536ae25-51ba-49cc-af6f-7c7b9ceff289\") " Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.283286 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1536ae25-51ba-49cc-af6f-7c7b9ceff289-kube-api-access-t57sm" (OuterVolumeSpecName: "kube-api-access-t57sm") pod "1536ae25-51ba-49cc-af6f-7c7b9ceff289" (UID: "1536ae25-51ba-49cc-af6f-7c7b9ceff289"). InnerVolumeSpecName "kube-api-access-t57sm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.331547 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1536ae25-51ba-49cc-af6f-7c7b9ceff289" (UID: "1536ae25-51ba-49cc-af6f-7c7b9ceff289"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.333205 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-config" (OuterVolumeSpecName: "config") pod "1536ae25-51ba-49cc-af6f-7c7b9ceff289" (UID: "1536ae25-51ba-49cc-af6f-7c7b9ceff289"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.333390 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1536ae25-51ba-49cc-af6f-7c7b9ceff289" (UID: "1536ae25-51ba-49cc-af6f-7c7b9ceff289"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.338338 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1536ae25-51ba-49cc-af6f-7c7b9ceff289" (UID: "1536ae25-51ba-49cc-af6f-7c7b9ceff289"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.347598 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1536ae25-51ba-49cc-af6f-7c7b9ceff289" (UID: "1536ae25-51ba-49cc-af6f-7c7b9ceff289"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.383201 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.383231 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.383242 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t57sm\" (UniqueName: \"kubernetes.io/projected/1536ae25-51ba-49cc-af6f-7c7b9ceff289-kube-api-access-t57sm\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.383251 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.383260 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.383269 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1536ae25-51ba-49cc-af6f-7c7b9ceff289-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.779253 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" event={"ID":"1536ae25-51ba-49cc-af6f-7c7b9ceff289","Type":"ContainerDied","Data":"422f7639f75cb11ba3e72c65f5d4ca9421c7fc1318e945c04382cd02ec50598d"} Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.779330 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-ngbtv" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.779352 4857 scope.go:117] "RemoveContainer" containerID="e3de1d602e550769f65ce7b67f332e8faf3bf620ee41df192da5a83fde0c9cea" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.779442 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="cinder-scheduler" containerID="cri-o://e0bb363016e6f4cbc982924e7be917fee5e3be447b9969c215fd22c123388d00" gracePeriod=30 Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.779588 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="probe" containerID="cri-o://9cfdecf7caf62c080c8cfd85adc522a1a171aa64e83ba6e3f857dabfa18a1e30" gracePeriod=30 Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.817861 4857 scope.go:117] "RemoveContainer" containerID="7d2bd8e29d3a7fa4f57343292d152ea8bddb7c80c8eb3acb6f72875fcb9b851d" Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.827093 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-ngbtv"] Nov 28 13:52:06 crc kubenswrapper[4857]: I1128 13:52:06.835973 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-ngbtv"] Nov 28 13:52:07 crc kubenswrapper[4857]: I1128 13:52:07.794225 4857 generic.go:334] "Generic (PLEG): container finished" podID="0bc32938-ce78-40f6-b288-939f758103fb" containerID="9cfdecf7caf62c080c8cfd85adc522a1a171aa64e83ba6e3f857dabfa18a1e30" exitCode=0 Nov 28 13:52:07 crc kubenswrapper[4857]: I1128 13:52:07.794337 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc32938-ce78-40f6-b288-939f758103fb","Type":"ContainerDied","Data":"9cfdecf7caf62c080c8cfd85adc522a1a171aa64e83ba6e3f857dabfa18a1e30"} Nov 28 13:52:08 crc kubenswrapper[4857]: I1128 13:52:08.241998 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" path="/var/lib/kubelet/pods/1536ae25-51ba-49cc-af6f-7c7b9ceff289/volumes" Nov 28 13:52:08 crc kubenswrapper[4857]: I1128 13:52:08.274588 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Nov 28 13:52:09 crc kubenswrapper[4857]: I1128 13:52:09.425217 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f59f6f468-scr9q" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:33638->10.217.0.159:9311: read: connection reset by peer" Nov 28 13:52:09 crc kubenswrapper[4857]: I1128 13:52:09.425261 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f59f6f468-scr9q" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:33640->10.217.0.159:9311: read: connection reset by peer" Nov 28 13:52:09 crc kubenswrapper[4857]: I1128 13:52:09.817406 4857 generic.go:334] "Generic (PLEG): container finished" podID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerID="80755f1ba19e862974f4ce9bd788f0b8def82c945ad81d7dcef824cfe8524303" exitCode=0 Nov 28 13:52:09 crc kubenswrapper[4857]: I1128 13:52:09.817518 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f59f6f468-scr9q" event={"ID":"b50ae785-eb22-4146-9a9e-9f85af67632c","Type":"ContainerDied","Data":"80755f1ba19e862974f4ce9bd788f0b8def82c945ad81d7dcef824cfe8524303"} Nov 28 13:52:09 crc kubenswrapper[4857]: I1128 13:52:09.905408 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:52:10 crc kubenswrapper[4857]: E1128 13:52:10.053919 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice/crio-4723d92a5cff83c24a63924037be0671f86e6be917ad119264f1539ec104b7b7\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fd8fd83_9147_4bbd_80f4_1eb001e2673b.slice\": RecentStats: unable to find data in memory cache]" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.060809 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpp2v\" (UniqueName: \"kubernetes.io/projected/b50ae785-eb22-4146-9a9e-9f85af67632c-kube-api-access-dpp2v\") pod \"b50ae785-eb22-4146-9a9e-9f85af67632c\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.061022 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data\") pod \"b50ae785-eb22-4146-9a9e-9f85af67632c\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.061090 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b50ae785-eb22-4146-9a9e-9f85af67632c-logs\") pod \"b50ae785-eb22-4146-9a9e-9f85af67632c\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.061142 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-combined-ca-bundle\") pod \"b50ae785-eb22-4146-9a9e-9f85af67632c\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.061238 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data-custom\") pod \"b50ae785-eb22-4146-9a9e-9f85af67632c\" (UID: \"b50ae785-eb22-4146-9a9e-9f85af67632c\") " Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.061524 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b50ae785-eb22-4146-9a9e-9f85af67632c-logs" (OuterVolumeSpecName: "logs") pod "b50ae785-eb22-4146-9a9e-9f85af67632c" (UID: "b50ae785-eb22-4146-9a9e-9f85af67632c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.063090 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b50ae785-eb22-4146-9a9e-9f85af67632c-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.067308 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b50ae785-eb22-4146-9a9e-9f85af67632c-kube-api-access-dpp2v" (OuterVolumeSpecName: "kube-api-access-dpp2v") pod "b50ae785-eb22-4146-9a9e-9f85af67632c" (UID: "b50ae785-eb22-4146-9a9e-9f85af67632c"). InnerVolumeSpecName "kube-api-access-dpp2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.091163 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b50ae785-eb22-4146-9a9e-9f85af67632c" (UID: "b50ae785-eb22-4146-9a9e-9f85af67632c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.091620 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b50ae785-eb22-4146-9a9e-9f85af67632c" (UID: "b50ae785-eb22-4146-9a9e-9f85af67632c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.135516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data" (OuterVolumeSpecName: "config-data") pod "b50ae785-eb22-4146-9a9e-9f85af67632c" (UID: "b50ae785-eb22-4146-9a9e-9f85af67632c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.164879 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpp2v\" (UniqueName: \"kubernetes.io/projected/b50ae785-eb22-4146-9a9e-9f85af67632c-kube-api-access-dpp2v\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.165324 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.165423 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.165509 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b50ae785-eb22-4146-9a9e-9f85af67632c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.834720 4857 generic.go:334] "Generic (PLEG): container finished" podID="0bc32938-ce78-40f6-b288-939f758103fb" containerID="e0bb363016e6f4cbc982924e7be917fee5e3be447b9969c215fd22c123388d00" exitCode=0 Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.834794 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc32938-ce78-40f6-b288-939f758103fb","Type":"ContainerDied","Data":"e0bb363016e6f4cbc982924e7be917fee5e3be447b9969c215fd22c123388d00"} Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.837071 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f59f6f468-scr9q" event={"ID":"b50ae785-eb22-4146-9a9e-9f85af67632c","Type":"ContainerDied","Data":"fb4ec4691b50c4e88b317977963163991d47a883448c6c4d030f4ab79e69a043"} Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.837104 4857 scope.go:117] "RemoveContainer" containerID="80755f1ba19e862974f4ce9bd788f0b8def82c945ad81d7dcef824cfe8524303" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.837129 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f59f6f468-scr9q" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.867230 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f59f6f468-scr9q"] Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.878036 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-f59f6f468-scr9q"] Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.880812 4857 scope.go:117] "RemoveContainer" containerID="a5c0da2b65c34b00d3fbe2b28448d1bfb0ec06ae80ce69786b464ebef8e2a25c" Nov 28 13:52:10 crc kubenswrapper[4857]: I1128 13:52:10.981689 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.079614 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-scripts\") pod \"0bc32938-ce78-40f6-b288-939f758103fb\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.079849 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data\") pod \"0bc32938-ce78-40f6-b288-939f758103fb\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.079905 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data-custom\") pod \"0bc32938-ce78-40f6-b288-939f758103fb\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.079937 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgrw9\" (UniqueName: \"kubernetes.io/projected/0bc32938-ce78-40f6-b288-939f758103fb-kube-api-access-wgrw9\") pod \"0bc32938-ce78-40f6-b288-939f758103fb\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.079999 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-combined-ca-bundle\") pod \"0bc32938-ce78-40f6-b288-939f758103fb\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.080033 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc32938-ce78-40f6-b288-939f758103fb-etc-machine-id\") pod \"0bc32938-ce78-40f6-b288-939f758103fb\" (UID: \"0bc32938-ce78-40f6-b288-939f758103fb\") " Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.080511 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0bc32938-ce78-40f6-b288-939f758103fb-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0bc32938-ce78-40f6-b288-939f758103fb" (UID: "0bc32938-ce78-40f6-b288-939f758103fb"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.087291 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bc32938-ce78-40f6-b288-939f758103fb-kube-api-access-wgrw9" (OuterVolumeSpecName: "kube-api-access-wgrw9") pod "0bc32938-ce78-40f6-b288-939f758103fb" (UID: "0bc32938-ce78-40f6-b288-939f758103fb"). InnerVolumeSpecName "kube-api-access-wgrw9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.087390 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0bc32938-ce78-40f6-b288-939f758103fb" (UID: "0bc32938-ce78-40f6-b288-939f758103fb"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.098782 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-scripts" (OuterVolumeSpecName: "scripts") pod "0bc32938-ce78-40f6-b288-939f758103fb" (UID: "0bc32938-ce78-40f6-b288-939f758103fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.150921 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bc32938-ce78-40f6-b288-939f758103fb" (UID: "0bc32938-ce78-40f6-b288-939f758103fb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.181638 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.181663 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgrw9\" (UniqueName: \"kubernetes.io/projected/0bc32938-ce78-40f6-b288-939f758103fb-kube-api-access-wgrw9\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.181674 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.181682 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0bc32938-ce78-40f6-b288-939f758103fb-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.181692 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.249351 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data" (OuterVolumeSpecName: "config-data") pod "0bc32938-ce78-40f6-b288-939f758103fb" (UID: "0bc32938-ce78-40f6-b288-939f758103fb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.283333 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0bc32938-ce78-40f6-b288-939f758103fb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.308684 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.308753 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.848195 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0bc32938-ce78-40f6-b288-939f758103fb","Type":"ContainerDied","Data":"7313f6fc12d83404a717e915ec7f608e8660a762e895d602cb67c810b845037b"} Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.848258 4857 scope.go:117] "RemoveContainer" containerID="9cfdecf7caf62c080c8cfd85adc522a1a171aa64e83ba6e3f857dabfa18a1e30" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.848269 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.873610 4857 scope.go:117] "RemoveContainer" containerID="e0bb363016e6f4cbc982924e7be917fee5e3be447b9969c215fd22c123388d00" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.888858 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.901076 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.914508 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:52:11 crc kubenswrapper[4857]: E1128 13:52:11.915076 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915095 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api" Nov 28 13:52:11 crc kubenswrapper[4857]: E1128 13:52:11.915115 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="probe" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915123 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="probe" Nov 28 13:52:11 crc kubenswrapper[4857]: E1128 13:52:11.915155 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api-log" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915163 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api-log" Nov 28 13:52:11 crc kubenswrapper[4857]: E1128 13:52:11.915176 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="cinder-scheduler" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915183 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="cinder-scheduler" Nov 28 13:52:11 crc kubenswrapper[4857]: E1128 13:52:11.915199 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerName="init" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915206 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerName="init" Nov 28 13:52:11 crc kubenswrapper[4857]: E1128 13:52:11.915220 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerName="dnsmasq-dns" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915227 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerName="dnsmasq-dns" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915445 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api-log" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915477 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1536ae25-51ba-49cc-af6f-7c7b9ceff289" containerName="dnsmasq-dns" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915490 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="cinder-scheduler" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915498 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" containerName="barbican-api" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.915513 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bc32938-ce78-40f6-b288-939f758103fb" containerName="probe" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.926598 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.930797 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:52:11 crc kubenswrapper[4857]: I1128 13:52:11.934513 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.099044 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.099084 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.099119 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.099152 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-scripts\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.099170 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fpvdq\" (UniqueName: \"kubernetes.io/projected/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-kube-api-access-fpvdq\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.099236 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201098 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201180 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201215 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-scripts\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201233 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fpvdq\" (UniqueName: \"kubernetes.io/projected/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-kube-api-access-fpvdq\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201298 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.201329 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.207003 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.209476 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.215448 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-scripts\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.216367 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.219754 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fpvdq\" (UniqueName: \"kubernetes.io/projected/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-kube-api-access-fpvdq\") pod \"cinder-scheduler-0\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.240983 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bc32938-ce78-40f6-b288-939f758103fb" path="/var/lib/kubelet/pods/0bc32938-ce78-40f6-b288-939f758103fb/volumes" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.241984 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b50ae785-eb22-4146-9a9e-9f85af67632c" path="/var/lib/kubelet/pods/b50ae785-eb22-4146-9a9e-9f85af67632c/volumes" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.250476 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.262353 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.785456 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:52:12 crc kubenswrapper[4857]: W1128 13:52:12.787157 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9ee51cc1_1ef7_4b41_92a4_eeb50ee0247c.slice/crio-e67a29c57992976bef66f65980c56527ff2dccd86514f6a38ee24b1cf6b1a217 WatchSource:0}: Error finding container e67a29c57992976bef66f65980c56527ff2dccd86514f6a38ee24b1cf6b1a217: Status 404 returned error can't find the container with id e67a29c57992976bef66f65980c56527ff2dccd86514f6a38ee24b1cf6b1a217 Nov 28 13:52:12 crc kubenswrapper[4857]: I1128 13:52:12.857351 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c","Type":"ContainerStarted","Data":"e67a29c57992976bef66f65980c56527ff2dccd86514f6a38ee24b1cf6b1a217"} Nov 28 13:52:13 crc kubenswrapper[4857]: I1128 13:52:13.871163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c","Type":"ContainerStarted","Data":"e34bc2b2baed17dc94ca9dc3ed8f2bf2da5b6e0bf67630dbb959a6581e276f47"} Nov 28 13:52:13 crc kubenswrapper[4857]: I1128 13:52:13.893254 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:52:13 crc kubenswrapper[4857]: I1128 13:52:13.894068 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:52:14 crc kubenswrapper[4857]: I1128 13:52:14.707799 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:52:14 crc kubenswrapper[4857]: I1128 13:52:14.814780 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:52:14 crc kubenswrapper[4857]: I1128 13:52:14.892442 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c","Type":"ContainerStarted","Data":"949cd8e48f0fc8953dd188f71049a00ec864bfbb7da60444a68266e465fb949e"} Nov 28 13:52:14 crc kubenswrapper[4857]: I1128 13:52:14.919979 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.919938781 podStartE2EDuration="3.919938781s" podCreationTimestamp="2025-11-28 13:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:52:14.905351077 +0000 UTC m=+1385.029292524" watchObservedRunningTime="2025-11-28 13:52:14.919938781 +0000 UTC m=+1385.043880228" Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.262775 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.665131 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.720704 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b48bfd44-jstx9"] Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.720909 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-b48bfd44-jstx9" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-api" containerID="cri-o://54a7726b2191651fd67bcae0c2e74b8b56597f703b356cdffa64c067aea302e9" gracePeriod=30 Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.721346 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-b48bfd44-jstx9" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-httpd" containerID="cri-o://f6d1f70b6b825af6a00c0b03c85ab2fbf69835476846543de4e466d40c7f0e73" gracePeriod=30 Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.920708 4857 generic.go:334] "Generic (PLEG): container finished" podID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerID="f6d1f70b6b825af6a00c0b03c85ab2fbf69835476846543de4e466d40c7f0e73" exitCode=0 Nov 28 13:52:17 crc kubenswrapper[4857]: I1128 13:52:17.920777 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b48bfd44-jstx9" event={"ID":"e88c2b4b-8abf-4d38-a17f-b5231a0a254e","Type":"ContainerDied","Data":"f6d1f70b6b825af6a00c0b03c85ab2fbf69835476846543de4e466d40c7f0e73"} Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.399208 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.400255 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.403973 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.404121 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-mf4wf" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.405097 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.418040 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.421725 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.421835 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx6cq\" (UniqueName: \"kubernetes.io/projected/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-kube-api-access-jx6cq\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.422000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config-secret\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.422066 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.523051 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.523154 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx6cq\" (UniqueName: \"kubernetes.io/projected/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-kube-api-access-jx6cq\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.523197 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config-secret\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.523215 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.523988 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.529934 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-combined-ca-bundle\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.532334 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config-secret\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.539714 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx6cq\" (UniqueName: \"kubernetes.io/projected/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-kube-api-access-jx6cq\") pod \"openstackclient\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.719481 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.819371 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.858462 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.874596 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.884183 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:18 crc kubenswrapper[4857]: I1128 13:52:18.901017 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.039234 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config-secret\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.039303 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.039328 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.039357 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8kqf\" (UniqueName: \"kubernetes.io/projected/c3d9d9f7-3d10-4677-aaef-60d731a33857-kube-api-access-h8kqf\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: E1128 13:52:19.057695 4857 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 28 13:52:19 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_631cbfc6-824a-4c9f-bbae-34da2ce2f8b3_0(37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9" Netns:"/var/run/netns/e5659324-e7b2-4c5e-b476-cb12955cba3a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9;K8S_POD_UID=631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: [openstack/openstackclient/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/openstackclient 37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9 network default NAD default] [openstack/openstackclient 37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9 network default NAD default] pod deleted before sandbox ADD operation began Nov 28 13:52:19 crc kubenswrapper[4857]: ' Nov 28 13:52:19 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:52:19 crc kubenswrapper[4857]: > Nov 28 13:52:19 crc kubenswrapper[4857]: E1128 13:52:19.057765 4857 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 28 13:52:19 crc kubenswrapper[4857]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_631cbfc6-824a-4c9f-bbae-34da2ce2f8b3_0(37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9" Netns:"/var/run/netns/e5659324-e7b2-4c5e-b476-cb12955cba3a" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9;K8S_POD_UID=631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: [openstack/openstackclient/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3:ovn-kubernetes]: error adding container to network "ovn-kubernetes": CNI request failed with status 400: '[openstack/openstackclient 37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9 network default NAD default] [openstack/openstackclient 37205f511b3543fc33d5f6cd9e1abf3955d5e0dbd7093ac2134d59510df0beb9 network default NAD default] pod deleted before sandbox ADD operation began Nov 28 13:52:19 crc kubenswrapper[4857]: ' Nov 28 13:52:19 crc kubenswrapper[4857]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 28 13:52:19 crc kubenswrapper[4857]: > pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.141652 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config-secret\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.141719 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.141755 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.141794 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8kqf\" (UniqueName: \"kubernetes.io/projected/c3d9d9f7-3d10-4677-aaef-60d731a33857-kube-api-access-h8kqf\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.142768 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.146619 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config-secret\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.148324 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-combined-ca-bundle\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.158978 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8kqf\" (UniqueName: \"kubernetes.io/projected/c3d9d9f7-3d10-4677-aaef-60d731a33857-kube-api-access-h8kqf\") pod \"openstackclient\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.211747 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.656046 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.950264 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"c3d9d9f7-3d10-4677-aaef-60d731a33857","Type":"ContainerStarted","Data":"72d602483baf5c720b4ab7eca6292675325ad5c01f68425a99133b9d1400d212"} Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.950290 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:19 crc kubenswrapper[4857]: I1128 13:52:19.955630 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.612747 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.617189 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.795379 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config-secret\") pod \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.796327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx6cq\" (UniqueName: \"kubernetes.io/projected/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-kube-api-access-jx6cq\") pod \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.796499 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-combined-ca-bundle\") pod \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.796673 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config\") pod \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\" (UID: \"631cbfc6-824a-4c9f-bbae-34da2ce2f8b3\") " Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.797129 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" (UID: "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.797434 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.800814 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" (UID: "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.808140 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-kube-api-access-jx6cq" (OuterVolumeSpecName: "kube-api-access-jx6cq") pod "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" (UID: "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3"). InnerVolumeSpecName "kube-api-access-jx6cq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.820231 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" (UID: "631cbfc6-824a-4c9f-bbae-34da2ce2f8b3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.901308 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx6cq\" (UniqueName: \"kubernetes.io/projected/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-kube-api-access-jx6cq\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.901676 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.901687 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.960753 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.963782 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" Nov 28 13:52:20 crc kubenswrapper[4857]: I1128 13:52:20.976122 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.241810 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="631cbfc6-824a-4c9f-bbae-34da2ce2f8b3" path="/var/lib/kubelet/pods/631cbfc6-824a-4c9f-bbae-34da2ce2f8b3/volumes" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.456292 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7ddf6b88b7-6dfnw"] Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.459809 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.462714 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.462997 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.463016 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.470236 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7ddf6b88b7-6dfnw"] Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.581056 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.628909 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-combined-ca-bundle\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629028 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m84z\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-kube-api-access-2m84z\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629084 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-config-data\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629124 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-public-tls-certs\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629150 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-internal-tls-certs\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629172 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-run-httpd\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629189 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-log-httpd\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.629583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731040 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-config-data\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731102 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-public-tls-certs\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731130 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-internal-tls-certs\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731149 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-run-httpd\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731168 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-log-httpd\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731279 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731306 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-combined-ca-bundle\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731338 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m84z\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-kube-api-access-2m84z\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.731908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-run-httpd\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.732290 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-log-httpd\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.737355 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-internal-tls-certs\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.738324 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-public-tls-certs\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.740890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-combined-ca-bundle\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.741676 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-config-data\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.743786 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.750866 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m84z\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-kube-api-access-2m84z\") pod \"swift-proxy-7ddf6b88b7-6dfnw\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:22 crc kubenswrapper[4857]: I1128 13:52:22.786582 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.354105 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7ddf6b88b7-6dfnw"] Nov 28 13:52:23 crc kubenswrapper[4857]: W1128 13:52:23.361648 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3f3c4b68_eb9c_466a_accc_51a99bcdac06.slice/crio-12e218fa5f6f136b5238b54192e01e3874d3fc29f382727bd19e226597b2d40c WatchSource:0}: Error finding container 12e218fa5f6f136b5238b54192e01e3874d3fc29f382727bd19e226597b2d40c: Status 404 returned error can't find the container with id 12e218fa5f6f136b5238b54192e01e3874d3fc29f382727bd19e226597b2d40c Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.986489 4857 generic.go:334] "Generic (PLEG): container finished" podID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerID="54a7726b2191651fd67bcae0c2e74b8b56597f703b356cdffa64c067aea302e9" exitCode=0 Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.986697 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b48bfd44-jstx9" event={"ID":"e88c2b4b-8abf-4d38-a17f-b5231a0a254e","Type":"ContainerDied","Data":"54a7726b2191651fd67bcae0c2e74b8b56597f703b356cdffa64c067aea302e9"} Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.991117 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" event={"ID":"3f3c4b68-eb9c-466a-accc-51a99bcdac06","Type":"ContainerStarted","Data":"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c"} Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.992109 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.992122 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" event={"ID":"3f3c4b68-eb9c-466a-accc-51a99bcdac06","Type":"ContainerStarted","Data":"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703"} Nov 28 13:52:23 crc kubenswrapper[4857]: I1128 13:52:23.992132 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" event={"ID":"3f3c4b68-eb9c-466a-accc-51a99bcdac06","Type":"ContainerStarted","Data":"12e218fa5f6f136b5238b54192e01e3874d3fc29f382727bd19e226597b2d40c"} Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.034532 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" podStartSLOduration=2.03451074 podStartE2EDuration="2.03451074s" podCreationTimestamp="2025-11-28 13:52:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:52:24.01705575 +0000 UTC m=+1394.140997187" watchObservedRunningTime="2025-11-28 13:52:24.03451074 +0000 UTC m=+1394.158452167" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.465163 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.567240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctjjl\" (UniqueName: \"kubernetes.io/projected/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-kube-api-access-ctjjl\") pod \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.567313 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-ovndb-tls-certs\") pod \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.567411 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-config\") pod \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.567463 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-combined-ca-bundle\") pod \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.567499 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-httpd-config\") pod \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\" (UID: \"e88c2b4b-8abf-4d38-a17f-b5231a0a254e\") " Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.577986 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "e88c2b4b-8abf-4d38-a17f-b5231a0a254e" (UID: "e88c2b4b-8abf-4d38-a17f-b5231a0a254e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.580009 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-kube-api-access-ctjjl" (OuterVolumeSpecName: "kube-api-access-ctjjl") pod "e88c2b4b-8abf-4d38-a17f-b5231a0a254e" (UID: "e88c2b4b-8abf-4d38-a17f-b5231a0a254e"). InnerVolumeSpecName "kube-api-access-ctjjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.627482 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e88c2b4b-8abf-4d38-a17f-b5231a0a254e" (UID: "e88c2b4b-8abf-4d38-a17f-b5231a0a254e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.652632 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "e88c2b4b-8abf-4d38-a17f-b5231a0a254e" (UID: "e88c2b4b-8abf-4d38-a17f-b5231a0a254e"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.660215 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-config" (OuterVolumeSpecName: "config") pod "e88c2b4b-8abf-4d38-a17f-b5231a0a254e" (UID: "e88c2b4b-8abf-4d38-a17f-b5231a0a254e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.675650 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctjjl\" (UniqueName: \"kubernetes.io/projected/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-kube-api-access-ctjjl\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.675684 4857 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.675694 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.675703 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:24 crc kubenswrapper[4857]: I1128 13:52:24.675712 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/e88c2b4b-8abf-4d38-a17f-b5231a0a254e-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.025380 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b48bfd44-jstx9" Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.026250 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b48bfd44-jstx9" event={"ID":"e88c2b4b-8abf-4d38-a17f-b5231a0a254e","Type":"ContainerDied","Data":"65c99d3abfad19cb4fc518b3c1a3f975ed0531ce0753b9c86263895e31aecedc"} Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.026318 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.026339 4857 scope.go:117] "RemoveContainer" containerID="f6d1f70b6b825af6a00c0b03c85ab2fbf69835476846543de4e466d40c7f0e73" Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.067187 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b48bfd44-jstx9"] Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.082577 4857 scope.go:117] "RemoveContainer" containerID="54a7726b2191651fd67bcae0c2e74b8b56597f703b356cdffa64c067aea302e9" Nov 28 13:52:25 crc kubenswrapper[4857]: I1128 13:52:25.084774 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b48bfd44-jstx9"] Nov 28 13:52:26 crc kubenswrapper[4857]: I1128 13:52:26.041586 4857 generic.go:334] "Generic (PLEG): container finished" podID="9b948893-d309-41ca-987c-287ee0b12ef2" containerID="dac2f631af2dd13cbefd63029290ff4e103fbb366fa60c50cb33a3e5334757da" exitCode=137 Nov 28 13:52:26 crc kubenswrapper[4857]: I1128 13:52:26.041665 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerDied","Data":"dac2f631af2dd13cbefd63029290ff4e103fbb366fa60c50cb33a3e5334757da"} Nov 28 13:52:26 crc kubenswrapper[4857]: I1128 13:52:26.244566 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" path="/var/lib/kubelet/pods/e88c2b4b-8abf-4d38-a17f-b5231a0a254e/volumes" Nov 28 13:52:27 crc kubenswrapper[4857]: I1128 13:52:27.629107 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:52:27 crc kubenswrapper[4857]: I1128 13:52:27.629390 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-log" containerID="cri-o://856c9326ccc530d5676d0bd8dbb59c6ee4506778c164fddd33bb31ea3053126b" gracePeriod=30 Nov 28 13:52:27 crc kubenswrapper[4857]: I1128 13:52:27.629437 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-httpd" containerID="cri-o://c64c13d5543f9321a6a584d223b8b188f62ddf7016e791563e13727f25e12270" gracePeriod=30 Nov 28 13:52:28 crc kubenswrapper[4857]: I1128 13:52:28.080018 4857 generic.go:334] "Generic (PLEG): container finished" podID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerID="856c9326ccc530d5676d0bd8dbb59c6ee4506778c164fddd33bb31ea3053126b" exitCode=143 Nov 28 13:52:28 crc kubenswrapper[4857]: I1128 13:52:28.080124 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645","Type":"ContainerDied","Data":"856c9326ccc530d5676d0bd8dbb59c6ee4506778c164fddd33bb31ea3053126b"} Nov 28 13:52:29 crc kubenswrapper[4857]: I1128 13:52:29.857612 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:52:29 crc kubenswrapper[4857]: I1128 13:52:29.858046 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-log" containerID="cri-o://ce095129f118f2af696e99064550cdd5c3f0cc311b63fd0d6fe09c258eebd664" gracePeriod=30 Nov 28 13:52:29 crc kubenswrapper[4857]: I1128 13:52:29.858250 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-httpd" containerID="cri-o://49ac69b05eee5d86114afb435aa0c9f9c1d54e38fdb7fd9f71f2ab23e6cf8c2f" gracePeriod=30 Nov 28 13:52:30 crc kubenswrapper[4857]: I1128 13:52:30.102432 4857 generic.go:334] "Generic (PLEG): container finished" podID="37e5bb25-0df5-40da-996d-b4e23120822b" containerID="ce095129f118f2af696e99064550cdd5c3f0cc311b63fd0d6fe09c258eebd664" exitCode=143 Nov 28 13:52:30 crc kubenswrapper[4857]: I1128 13:52:30.102822 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"37e5bb25-0df5-40da-996d-b4e23120822b","Type":"ContainerDied","Data":"ce095129f118f2af696e99064550cdd5c3f0cc311b63fd0d6fe09c258eebd664"} Nov 28 13:52:31 crc kubenswrapper[4857]: I1128 13:52:31.115099 4857 generic.go:334] "Generic (PLEG): container finished" podID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerID="c64c13d5543f9321a6a584d223b8b188f62ddf7016e791563e13727f25e12270" exitCode=0 Nov 28 13:52:31 crc kubenswrapper[4857]: I1128 13:52:31.115197 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645","Type":"ContainerDied","Data":"c64c13d5543f9321a6a584d223b8b188f62ddf7016e791563e13727f25e12270"} Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.136294 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"9b948893-d309-41ca-987c-287ee0b12ef2","Type":"ContainerDied","Data":"206e4d3af24f3f25b2d8df96d6b2cfb6b40b3fd31c43b1ffdd0ae90252851500"} Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.136648 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="206e4d3af24f3f25b2d8df96d6b2cfb6b40b3fd31c43b1ffdd0ae90252851500" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.305102 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.409093 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.470684 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-log-httpd\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.470787 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-httpd-run\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.470811 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-logs\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.471530 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-logs" (OuterVolumeSpecName: "logs") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.471559 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.472212 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474127 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh8w2\" (UniqueName: \"kubernetes.io/projected/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-kube-api-access-hh8w2\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474186 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474227 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-config-data\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474305 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-run-httpd\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474322 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-combined-ca-bundle\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474369 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-combined-ca-bundle\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474386 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr9fn\" (UniqueName: \"kubernetes.io/projected/9b948893-d309-41ca-987c-287ee0b12ef2-kube-api-access-pr9fn\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474405 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-config-data\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474472 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-scripts\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474489 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-public-tls-certs\") pod \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\" (UID: \"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474547 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-sg-core-conf-yaml\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.474577 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-scripts\") pod \"9b948893-d309-41ca-987c-287ee0b12ef2\" (UID: \"9b948893-d309-41ca-987c-287ee0b12ef2\") " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.475274 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.475294 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.475304 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.479059 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.479116 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.479212 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b948893-d309-41ca-987c-287ee0b12ef2-kube-api-access-pr9fn" (OuterVolumeSpecName: "kube-api-access-pr9fn") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "kube-api-access-pr9fn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.481652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-scripts" (OuterVolumeSpecName: "scripts") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.482516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-kube-api-access-hh8w2" (OuterVolumeSpecName: "kube-api-access-hh8w2") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "kube-api-access-hh8w2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.486075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-scripts" (OuterVolumeSpecName: "scripts") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.515096 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.527631 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.567870 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576480 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh8w2\" (UniqueName: \"kubernetes.io/projected/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-kube-api-access-hh8w2\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576528 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576541 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/9b948893-d309-41ca-987c-287ee0b12ef2-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576550 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576559 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr9fn\" (UniqueName: \"kubernetes.io/projected/9b948893-d309-41ca-987c-287ee0b12ef2-kube-api-access-pr9fn\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576567 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576575 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576583 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.576592 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.577854 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-config-data" (OuterVolumeSpecName: "config-data") pod "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" (UID: "f5ed9ca1-bc80-4c08-b90b-ee00cfd05645"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.581131 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.599129 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.617040 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-config-data" (OuterVolumeSpecName: "config-data") pod "9b948893-d309-41ca-987c-287ee0b12ef2" (UID: "9b948893-d309-41ca-987c-287ee0b12ef2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.678562 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.678611 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.678637 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.678648 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b948893-d309-41ca-987c-287ee0b12ef2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.797630 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:32 crc kubenswrapper[4857]: I1128 13:52:32.803453 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.150994 4857 generic.go:334] "Generic (PLEG): container finished" podID="37e5bb25-0df5-40da-996d-b4e23120822b" containerID="49ac69b05eee5d86114afb435aa0c9f9c1d54e38fdb7fd9f71f2ab23e6cf8c2f" exitCode=0 Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.151110 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"37e5bb25-0df5-40da-996d-b4e23120822b","Type":"ContainerDied","Data":"49ac69b05eee5d86114afb435aa0c9f9c1d54e38fdb7fd9f71f2ab23e6cf8c2f"} Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.155480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f5ed9ca1-bc80-4c08-b90b-ee00cfd05645","Type":"ContainerDied","Data":"0961de5fcd8570f79f704e21bb51e23997d1754b8e25281a801cda46f108bdf1"} Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.155509 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.155533 4857 scope.go:117] "RemoveContainer" containerID="c64c13d5543f9321a6a584d223b8b188f62ddf7016e791563e13727f25e12270" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.159618 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"c3d9d9f7-3d10-4677-aaef-60d731a33857","Type":"ContainerStarted","Data":"c29237331f4b473cdae2e6d65bf7f88564f2faca53b3e0624b2143a3d5d0c546"} Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.159693 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.181786 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.682235088 podStartE2EDuration="15.18176284s" podCreationTimestamp="2025-11-28 13:52:18 +0000 UTC" firstStartedPulling="2025-11-28 13:52:19.661970196 +0000 UTC m=+1389.785911633" lastFinishedPulling="2025-11-28 13:52:32.161497948 +0000 UTC m=+1402.285439385" observedRunningTime="2025-11-28 13:52:33.173527503 +0000 UTC m=+1403.297468940" watchObservedRunningTime="2025-11-28 13:52:33.18176284 +0000 UTC m=+1403.305704277" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.225698 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.232186 4857 scope.go:117] "RemoveContainer" containerID="856c9326ccc530d5676d0bd8dbb59c6ee4506778c164fddd33bb31ea3053126b" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.239374 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.250037 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.258085 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280010 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280419 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-log" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280435 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-log" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280452 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280458 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280476 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-central-agent" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280482 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-central-agent" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280492 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-api" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280498 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-api" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280510 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280516 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280533 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="proxy-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280539 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="proxy-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280549 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="sg-core" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280554 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="sg-core" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.280566 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-notification-agent" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280571 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-notification-agent" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280728 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="sg-core" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280740 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280751 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="proxy-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280758 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280767 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e88c2b4b-8abf-4d38-a17f-b5231a0a254e" containerName="neutron-api" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280777 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-central-agent" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280789 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" containerName="glance-log" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.280802 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" containerName="ceilometer-notification-agent" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.282468 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.287700 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.292640 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.292822 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.296364 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.298355 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.298669 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.305836 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.317768 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398663 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398707 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-logs\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398734 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398762 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398790 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vktvk\" (UniqueName: \"kubernetes.io/projected/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-kube-api-access-vktvk\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398812 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-scripts\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398839 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398864 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-config-data\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398893 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-run-httpd\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-config-data\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398939 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-scripts\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398975 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.398993 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2g8t\" (UniqueName: \"kubernetes.io/projected/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-kube-api-access-r2g8t\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.399013 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.399036 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-log-httpd\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502742 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502788 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-logs\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502816 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502851 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vktvk\" (UniqueName: \"kubernetes.io/projected/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-kube-api-access-vktvk\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502897 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-scripts\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502926 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502973 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-config-data\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.502992 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-run-httpd\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.503015 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-config-data\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.503036 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-scripts\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.503061 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.503076 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2g8t\" (UniqueName: \"kubernetes.io/projected/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-kube-api-access-r2g8t\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.503098 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.503122 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-log-httpd\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.504254 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-log-httpd\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.504257 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.504688 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.504967 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-logs\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.505420 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-run-httpd\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.514306 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.516058 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-config-data\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.517600 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-scripts\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.526506 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-scripts\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.526646 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.527381 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.527531 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.528152 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-config-data\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.528609 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vktvk\" (UniqueName: \"kubernetes.io/projected/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-kube-api-access-vktvk\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.529274 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2g8t\" (UniqueName: \"kubernetes.io/projected/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-kube-api-access-r2g8t\") pod \"ceilometer-0\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.547080 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.609627 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.643621 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.687062 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.807788 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-combined-ca-bundle\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808451 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnbmc\" (UniqueName: \"kubernetes.io/projected/37e5bb25-0df5-40da-996d-b4e23120822b-kube-api-access-gnbmc\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808503 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-config-data\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808532 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-httpd-run\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808585 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-scripts\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808639 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-logs\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808685 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-internal-tls-certs\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.808733 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"37e5bb25-0df5-40da-996d-b4e23120822b\" (UID: \"37e5bb25-0df5-40da-996d-b4e23120822b\") " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.812021 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-logs" (OuterVolumeSpecName: "logs") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.812758 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.812983 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.815198 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37e5bb25-0df5-40da-996d-b4e23120822b-kube-api-access-gnbmc" (OuterVolumeSpecName: "kube-api-access-gnbmc") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "kube-api-access-gnbmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.815763 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-scripts" (OuterVolumeSpecName: "scripts") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.859121 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.876443 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-w6xq8"] Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.876848 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.876863 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: E1128 13:52:33.876878 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-log" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.876884 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-log" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.877492 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-httpd" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.877508 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-log" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.878464 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.895490 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-w6xq8"] Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.911114 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.911147 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.911159 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnbmc\" (UniqueName: \"kubernetes.io/projected/37e5bb25-0df5-40da-996d-b4e23120822b-kube-api-access-gnbmc\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.911185 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.911195 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.911204 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37e5bb25-0df5-40da-996d-b4e23120822b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.920155 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.938126 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 13:52:33 crc kubenswrapper[4857]: I1128 13:52:33.963701 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-config-data" (OuterVolumeSpecName: "config-data") pod "37e5bb25-0df5-40da-996d-b4e23120822b" (UID: "37e5bb25-0df5-40da-996d-b4e23120822b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.007965 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-cj65m"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.009192 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.012297 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwz4d\" (UniqueName: \"kubernetes.io/projected/6747722c-3764-4c1f-a2fb-5e604ccf27da-kube-api-access-cwz4d\") pod \"nova-api-db-create-w6xq8\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.012367 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6747722c-3764-4c1f-a2fb-5e604ccf27da-operator-scripts\") pod \"nova-api-db-create-w6xq8\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.012826 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.012848 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.012858 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37e5bb25-0df5-40da-996d-b4e23120822b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.019045 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-379c-account-create-update-b9q87"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.020669 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.031308 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.035089 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-cj65m"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.047322 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-379c-account-create-update-b9q87"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.125810 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6747722c-3764-4c1f-a2fb-5e604ccf27da-operator-scripts\") pod \"nova-api-db-create-w6xq8\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.125918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp44p\" (UniqueName: \"kubernetes.io/projected/c52ccccd-b22d-48e7-a20f-a612751942e5-kube-api-access-fp44p\") pod \"nova-cell0-db-create-cj65m\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.126175 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-operator-scripts\") pod \"nova-api-379c-account-create-update-b9q87\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.126268 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52ccccd-b22d-48e7-a20f-a612751942e5-operator-scripts\") pod \"nova-cell0-db-create-cj65m\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.126303 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnl5z\" (UniqueName: \"kubernetes.io/projected/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-kube-api-access-hnl5z\") pod \"nova-api-379c-account-create-update-b9q87\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.126363 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwz4d\" (UniqueName: \"kubernetes.io/projected/6747722c-3764-4c1f-a2fb-5e604ccf27da-kube-api-access-cwz4d\") pod \"nova-api-db-create-w6xq8\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.127825 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6747722c-3764-4c1f-a2fb-5e604ccf27da-operator-scripts\") pod \"nova-api-db-create-w6xq8\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.146067 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwz4d\" (UniqueName: \"kubernetes.io/projected/6747722c-3764-4c1f-a2fb-5e604ccf27da-kube-api-access-cwz4d\") pod \"nova-api-db-create-w6xq8\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.159987 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-xwr28"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.211164 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.235771 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xwr28"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.236145 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.238025 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-operator-scripts\") pod \"nova-api-379c-account-create-update-b9q87\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.238105 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52ccccd-b22d-48e7-a20f-a612751942e5-operator-scripts\") pod \"nova-cell0-db-create-cj65m\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.238131 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnl5z\" (UniqueName: \"kubernetes.io/projected/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-kube-api-access-hnl5z\") pod \"nova-api-379c-account-create-update-b9q87\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.238192 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp44p\" (UniqueName: \"kubernetes.io/projected/c52ccccd-b22d-48e7-a20f-a612751942e5-kube-api-access-fp44p\") pod \"nova-cell0-db-create-cj65m\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.240315 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52ccccd-b22d-48e7-a20f-a612751942e5-operator-scripts\") pod \"nova-cell0-db-create-cj65m\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.240516 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-operator-scripts\") pod \"nova-api-379c-account-create-update-b9q87\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.255478 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.258541 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b948893-d309-41ca-987c-287ee0b12ef2" path="/var/lib/kubelet/pods/9b948893-d309-41ca-987c-287ee0b12ef2/volumes" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.259615 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f5ed9ca1-bc80-4c08-b90b-ee00cfd05645" path="/var/lib/kubelet/pods/f5ed9ca1-bc80-4c08-b90b-ee00cfd05645/volumes" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.260080 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp44p\" (UniqueName: \"kubernetes.io/projected/c52ccccd-b22d-48e7-a20f-a612751942e5-kube-api-access-fp44p\") pod \"nova-cell0-db-create-cj65m\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.260486 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"37e5bb25-0df5-40da-996d-b4e23120822b","Type":"ContainerDied","Data":"1e894c5e9e72d4692d6fc1218e7ce3ff629efdc35020ce6b741fc17e72214e0e"} Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.260521 4857 scope.go:117] "RemoveContainer" containerID="49ac69b05eee5d86114afb435aa0c9f9c1d54e38fdb7fd9f71f2ab23e6cf8c2f" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.263392 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.265147 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnl5z\" (UniqueName: \"kubernetes.io/projected/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-kube-api-access-hnl5z\") pod \"nova-api-379c-account-create-update-b9q87\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.273181 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-837f-account-create-update-x4tph"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.278824 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.281308 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.290178 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-837f-account-create-update-x4tph"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.340821 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89kxj\" (UniqueName: \"kubernetes.io/projected/df3949de-0120-4289-8dfa-71e0ea70deaf-kube-api-access-89kxj\") pod \"nova-cell1-db-create-xwr28\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.340900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95fe79f-e849-42ef-bb76-1fe84548c3ae-operator-scripts\") pod \"nova-cell0-837f-account-create-update-x4tph\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.341005 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22c9s\" (UniqueName: \"kubernetes.io/projected/f95fe79f-e849-42ef-bb76-1fe84548c3ae-kube-api-access-22c9s\") pod \"nova-cell0-837f-account-create-update-x4tph\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.341050 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df3949de-0120-4289-8dfa-71e0ea70deaf-operator-scripts\") pod \"nova-cell1-db-create-xwr28\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.362710 4857 scope.go:117] "RemoveContainer" containerID="ce095129f118f2af696e99064550cdd5c3f0cc311b63fd0d6fe09c258eebd664" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.366123 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.387015 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.392064 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.408348 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.410312 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.411530 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.422463 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.422637 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.428034 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-96aa-account-create-update-2bzpb"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.429340 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.431406 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.443997 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.452896 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-96aa-account-create-update-2bzpb"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.452921 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89kxj\" (UniqueName: \"kubernetes.io/projected/df3949de-0120-4289-8dfa-71e0ea70deaf-kube-api-access-89kxj\") pod \"nova-cell1-db-create-xwr28\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.453249 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95fe79f-e849-42ef-bb76-1fe84548c3ae-operator-scripts\") pod \"nova-cell0-837f-account-create-update-x4tph\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.453348 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22c9s\" (UniqueName: \"kubernetes.io/projected/f95fe79f-e849-42ef-bb76-1fe84548c3ae-kube-api-access-22c9s\") pod \"nova-cell0-837f-account-create-update-x4tph\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.453382 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df3949de-0120-4289-8dfa-71e0ea70deaf-operator-scripts\") pod \"nova-cell1-db-create-xwr28\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.454447 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95fe79f-e849-42ef-bb76-1fe84548c3ae-operator-scripts\") pod \"nova-cell0-837f-account-create-update-x4tph\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.458071 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df3949de-0120-4289-8dfa-71e0ea70deaf-operator-scripts\") pod \"nova-cell1-db-create-xwr28\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.500304 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89kxj\" (UniqueName: \"kubernetes.io/projected/df3949de-0120-4289-8dfa-71e0ea70deaf-kube-api-access-89kxj\") pod \"nova-cell1-db-create-xwr28\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.503990 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22c9s\" (UniqueName: \"kubernetes.io/projected/f95fe79f-e849-42ef-bb76-1fe84548c3ae-kube-api-access-22c9s\") pod \"nova-cell0-837f-account-create-update-x4tph\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.542869 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.559963 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560047 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bqjf\" (UniqueName: \"kubernetes.io/projected/c0cffe75-b02c-411f-8f5c-3eec6d36659d-kube-api-access-7bqjf\") pod \"nova-cell1-96aa-account-create-update-2bzpb\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560071 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560087 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560135 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560169 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvswz\" (UniqueName: \"kubernetes.io/projected/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-kube-api-access-fvswz\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560186 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560203 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0cffe75-b02c-411f-8f5c-3eec6d36659d-operator-scripts\") pod \"nova-cell1-96aa-account-create-update-2bzpb\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560247 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.560270 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.657164 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.665909 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666042 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666104 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bqjf\" (UniqueName: \"kubernetes.io/projected/c0cffe75-b02c-411f-8f5c-3eec6d36659d-kube-api-access-7bqjf\") pod \"nova-cell1-96aa-account-create-update-2bzpb\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666133 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666332 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666429 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvswz\" (UniqueName: \"kubernetes.io/projected/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-kube-api-access-fvswz\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666491 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666515 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0cffe75-b02c-411f-8f5c-3eec6d36659d-operator-scripts\") pod \"nova-cell1-96aa-account-create-update-2bzpb\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.666594 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.667164 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.677808 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-logs\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.677991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.679926 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0cffe75-b02c-411f-8f5c-3eec6d36659d-operator-scripts\") pod \"nova-cell1-96aa-account-create-update-2bzpb\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.680484 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.680804 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.681089 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.682490 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.701792 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bqjf\" (UniqueName: \"kubernetes.io/projected/c0cffe75-b02c-411f-8f5c-3eec6d36659d-kube-api-access-7bqjf\") pod \"nova-cell1-96aa-account-create-update-2bzpb\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.706354 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.717580 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvswz\" (UniqueName: \"kubernetes.io/projected/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-kube-api-access-fvswz\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.726590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-internal-api-0\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.801383 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.802549 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.830696 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-w6xq8"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.979404 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-cj65m"] Nov 28 13:52:34 crc kubenswrapper[4857]: I1128 13:52:34.997172 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-379c-account-create-update-b9q87"] Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.271317 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-379c-account-create-update-b9q87" event={"ID":"28ddb94c-f564-4ac2-b665-02f5c1b7d96d","Type":"ContainerStarted","Data":"5765bb631fec44001d7e3963516105a594fb87a8b602d5622c7b4aaa0ffc2f74"} Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.273425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd","Type":"ContainerStarted","Data":"b9b1ae6b7705e6c16c24f2a3693230e80524ec07683afc7a556022f8669e5aaf"} Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.275384 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerStarted","Data":"ebb845d601c4a74f8af6d019352a46b6ea1e1380ef39ead8c8aff0023824c262"} Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.276629 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cj65m" event={"ID":"c52ccccd-b22d-48e7-a20f-a612751942e5","Type":"ContainerStarted","Data":"d38fec2a5db2cf41ea84951b41619bd08c3aa4368f52987336d9eaceee55555b"} Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.278417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-w6xq8" event={"ID":"6747722c-3764-4c1f-a2fb-5e604ccf27da","Type":"ContainerStarted","Data":"8034039ed401a4147c86a674f88cc06d86e7a31f09e512a6694fb6cc49c260f4"} Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.452472 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-xwr28"] Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.591288 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-96aa-account-create-update-2bzpb"] Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.598870 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-837f-account-create-update-x4tph"] Nov 28 13:52:35 crc kubenswrapper[4857]: I1128 13:52:35.825246 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:52:35 crc kubenswrapper[4857]: W1128 13:52:35.949920 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2d5c05c_573d_441d_bc1d_9c2b3b8fd92e.slice/crio-8e94c359ab01e9b810ccced94896ac9ef45f1b3a60a3963ce6a7e7bb3a2ad954 WatchSource:0}: Error finding container 8e94c359ab01e9b810ccced94896ac9ef45f1b3a60a3963ce6a7e7bb3a2ad954: Status 404 returned error can't find the container with id 8e94c359ab01e9b810ccced94896ac9ef45f1b3a60a3963ce6a7e7bb3a2ad954 Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.059814 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.262926 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" path="/var/lib/kubelet/pods/37e5bb25-0df5-40da-996d-b4e23120822b/volumes" Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.303528 4857 generic.go:334] "Generic (PLEG): container finished" podID="c52ccccd-b22d-48e7-a20f-a612751942e5" containerID="8bf280eb12748bb49a64625337d8c5ff217351191d99af8e84f34162408eb879" exitCode=0 Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.303592 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cj65m" event={"ID":"c52ccccd-b22d-48e7-a20f-a612751942e5","Type":"ContainerDied","Data":"8bf280eb12748bb49a64625337d8c5ff217351191d99af8e84f34162408eb879"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.305068 4857 generic.go:334] "Generic (PLEG): container finished" podID="df3949de-0120-4289-8dfa-71e0ea70deaf" containerID="361d22577f867f73846943f2f7d6ad8aa5bad3ecfd7152d2ce7d1676a728ad7b" exitCode=0 Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.305110 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xwr28" event={"ID":"df3949de-0120-4289-8dfa-71e0ea70deaf","Type":"ContainerDied","Data":"361d22577f867f73846943f2f7d6ad8aa5bad3ecfd7152d2ce7d1676a728ad7b"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.305126 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xwr28" event={"ID":"df3949de-0120-4289-8dfa-71e0ea70deaf","Type":"ContainerStarted","Data":"70b456f4d6906c318a0f01320af43157838fd76ad092119655ab30860cf4a39e"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.305919 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e","Type":"ContainerStarted","Data":"8e94c359ab01e9b810ccced94896ac9ef45f1b3a60a3963ce6a7e7bb3a2ad954"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.314516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" event={"ID":"c0cffe75-b02c-411f-8f5c-3eec6d36659d","Type":"ContainerStarted","Data":"e27595f02f253a15d73f31e94dd3ca259868157d7312b06629d00ea7f038667e"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.314563 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" event={"ID":"c0cffe75-b02c-411f-8f5c-3eec6d36659d","Type":"ContainerStarted","Data":"f04e4b01885af6c5f28b6bf14651c005ca93a92f1bba19b29cb2f0e273755e0a"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.318144 4857 generic.go:334] "Generic (PLEG): container finished" podID="28ddb94c-f564-4ac2-b665-02f5c1b7d96d" containerID="2216da7127d601e0f56f42ae7929937c3281f01c4aa95f88133a482b0170b592" exitCode=0 Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.320316 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-379c-account-create-update-b9q87" event={"ID":"28ddb94c-f564-4ac2-b665-02f5c1b7d96d","Type":"ContainerDied","Data":"2216da7127d601e0f56f42ae7929937c3281f01c4aa95f88133a482b0170b592"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.340175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd","Type":"ContainerStarted","Data":"9d3d6c09860350f192015fe259ed4a4581cfb3b28583b924e843bda2835e7fb6"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.356547 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-837f-account-create-update-x4tph" event={"ID":"f95fe79f-e849-42ef-bb76-1fe84548c3ae","Type":"ContainerStarted","Data":"287f76eba3eb82eb4f37609b40da2cd1b1de08892bd1e15facb804dd6851a8c1"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.356593 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-837f-account-create-update-x4tph" event={"ID":"f95fe79f-e849-42ef-bb76-1fe84548c3ae","Type":"ContainerStarted","Data":"25aca510d749cd27a3a83ee5d133bc25d658c1fd67e8f149f7c3875d040e3a8c"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.373116 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" podStartSLOduration=2.373092983 podStartE2EDuration="2.373092983s" podCreationTimestamp="2025-11-28 13:52:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:52:36.344858229 +0000 UTC m=+1406.468799676" watchObservedRunningTime="2025-11-28 13:52:36.373092983 +0000 UTC m=+1406.497034420" Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.389519 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerStarted","Data":"fbb7d8f0bf5ce14f12aa752f5ba397a1ce9352ef7e7c6d9d000ec8414cd5fa73"} Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.394664 4857 generic.go:334] "Generic (PLEG): container finished" podID="6747722c-3764-4c1f-a2fb-5e604ccf27da" containerID="b5bdcc04015076d00440bc794607b51a9b0e42405828b71f2c17a0fc67c38cfd" exitCode=0 Nov 28 13:52:36 crc kubenswrapper[4857]: I1128 13:52:36.394788 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-w6xq8" event={"ID":"6747722c-3764-4c1f-a2fb-5e604ccf27da","Type":"ContainerDied","Data":"b5bdcc04015076d00440bc794607b51a9b0e42405828b71f2c17a0fc67c38cfd"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.416149 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd","Type":"ContainerStarted","Data":"7eaa667398c3e224c563e132d00cba92457ba8acc83d7c4ef18995674225d29a"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.418984 4857 generic.go:334] "Generic (PLEG): container finished" podID="f95fe79f-e849-42ef-bb76-1fe84548c3ae" containerID="287f76eba3eb82eb4f37609b40da2cd1b1de08892bd1e15facb804dd6851a8c1" exitCode=0 Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.419062 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-837f-account-create-update-x4tph" event={"ID":"f95fe79f-e849-42ef-bb76-1fe84548c3ae","Type":"ContainerDied","Data":"287f76eba3eb82eb4f37609b40da2cd1b1de08892bd1e15facb804dd6851a8c1"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.424208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerStarted","Data":"a005b3e5458e4a1153902e3eceef5303f2ff864baaeae1cbe944d1dc7cd29024"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.424253 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerStarted","Data":"b226d001d2f99f1d3c486d0c7912339f57fb7482458cd7d270ff2987378d287a"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.429755 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e","Type":"ContainerStarted","Data":"d99386d5fb6210356b4912e9ae342dc21823ae6279091264ad6b9328047690e7"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.429794 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e","Type":"ContainerStarted","Data":"0e9047254aa31e8c764eaf4e0c00c00b8b889bb9a2e0ba225c71bf05a09cdad0"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.435335 4857 generic.go:334] "Generic (PLEG): container finished" podID="c0cffe75-b02c-411f-8f5c-3eec6d36659d" containerID="e27595f02f253a15d73f31e94dd3ca259868157d7312b06629d00ea7f038667e" exitCode=0 Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.435425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" event={"ID":"c0cffe75-b02c-411f-8f5c-3eec6d36659d","Type":"ContainerDied","Data":"e27595f02f253a15d73f31e94dd3ca259868157d7312b06629d00ea7f038667e"} Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.449480 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.449454163 podStartE2EDuration="4.449454163s" podCreationTimestamp="2025-11-28 13:52:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:52:37.438702409 +0000 UTC m=+1407.562643856" watchObservedRunningTime="2025-11-28 13:52:37.449454163 +0000 UTC m=+1407.573395600" Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.470855 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.470816575 podStartE2EDuration="3.470816575s" podCreationTimestamp="2025-11-28 13:52:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:52:37.464377696 +0000 UTC m=+1407.588319133" watchObservedRunningTime="2025-11-28 13:52:37.470816575 +0000 UTC m=+1407.594758012" Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.863541 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.948117 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df3949de-0120-4289-8dfa-71e0ea70deaf-operator-scripts\") pod \"df3949de-0120-4289-8dfa-71e0ea70deaf\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.948218 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89kxj\" (UniqueName: \"kubernetes.io/projected/df3949de-0120-4289-8dfa-71e0ea70deaf-kube-api-access-89kxj\") pod \"df3949de-0120-4289-8dfa-71e0ea70deaf\" (UID: \"df3949de-0120-4289-8dfa-71e0ea70deaf\") " Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.949261 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df3949de-0120-4289-8dfa-71e0ea70deaf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "df3949de-0120-4289-8dfa-71e0ea70deaf" (UID: "df3949de-0120-4289-8dfa-71e0ea70deaf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:37 crc kubenswrapper[4857]: I1128 13:52:37.958074 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df3949de-0120-4289-8dfa-71e0ea70deaf-kube-api-access-89kxj" (OuterVolumeSpecName: "kube-api-access-89kxj") pod "df3949de-0120-4289-8dfa-71e0ea70deaf" (UID: "df3949de-0120-4289-8dfa-71e0ea70deaf"). InnerVolumeSpecName "kube-api-access-89kxj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.045699 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.050826 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/df3949de-0120-4289-8dfa-71e0ea70deaf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.050901 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89kxj\" (UniqueName: \"kubernetes.io/projected/df3949de-0120-4289-8dfa-71e0ea70deaf-kube-api-access-89kxj\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.059171 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.152601 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fp44p\" (UniqueName: \"kubernetes.io/projected/c52ccccd-b22d-48e7-a20f-a612751942e5-kube-api-access-fp44p\") pod \"c52ccccd-b22d-48e7-a20f-a612751942e5\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.152768 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52ccccd-b22d-48e7-a20f-a612751942e5-operator-scripts\") pod \"c52ccccd-b22d-48e7-a20f-a612751942e5\" (UID: \"c52ccccd-b22d-48e7-a20f-a612751942e5\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.152801 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95fe79f-e849-42ef-bb76-1fe84548c3ae-operator-scripts\") pod \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.152903 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22c9s\" (UniqueName: \"kubernetes.io/projected/f95fe79f-e849-42ef-bb76-1fe84548c3ae-kube-api-access-22c9s\") pod \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\" (UID: \"f95fe79f-e849-42ef-bb76-1fe84548c3ae\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.155468 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c52ccccd-b22d-48e7-a20f-a612751942e5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c52ccccd-b22d-48e7-a20f-a612751942e5" (UID: "c52ccccd-b22d-48e7-a20f-a612751942e5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.156366 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f95fe79f-e849-42ef-bb76-1fe84548c3ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f95fe79f-e849-42ef-bb76-1fe84548c3ae" (UID: "f95fe79f-e849-42ef-bb76-1fe84548c3ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.162121 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f95fe79f-e849-42ef-bb76-1fe84548c3ae-kube-api-access-22c9s" (OuterVolumeSpecName: "kube-api-access-22c9s") pod "f95fe79f-e849-42ef-bb76-1fe84548c3ae" (UID: "f95fe79f-e849-42ef-bb76-1fe84548c3ae"). InnerVolumeSpecName "kube-api-access-22c9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.170171 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c52ccccd-b22d-48e7-a20f-a612751942e5-kube-api-access-fp44p" (OuterVolumeSpecName: "kube-api-access-fp44p") pod "c52ccccd-b22d-48e7-a20f-a612751942e5" (UID: "c52ccccd-b22d-48e7-a20f-a612751942e5"). InnerVolumeSpecName "kube-api-access-fp44p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.263224 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.265001 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.267508 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fp44p\" (UniqueName: \"kubernetes.io/projected/c52ccccd-b22d-48e7-a20f-a612751942e5-kube-api-access-fp44p\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.267542 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c52ccccd-b22d-48e7-a20f-a612751942e5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.267553 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f95fe79f-e849-42ef-bb76-1fe84548c3ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.267568 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22c9s\" (UniqueName: \"kubernetes.io/projected/f95fe79f-e849-42ef-bb76-1fe84548c3ae-kube-api-access-22c9s\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.373598 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6747722c-3764-4c1f-a2fb-5e604ccf27da-operator-scripts\") pod \"6747722c-3764-4c1f-a2fb-5e604ccf27da\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.373701 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnl5z\" (UniqueName: \"kubernetes.io/projected/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-kube-api-access-hnl5z\") pod \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.374133 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-operator-scripts\") pod \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\" (UID: \"28ddb94c-f564-4ac2-b665-02f5c1b7d96d\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.374602 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwz4d\" (UniqueName: \"kubernetes.io/projected/6747722c-3764-4c1f-a2fb-5e604ccf27da-kube-api-access-cwz4d\") pod \"6747722c-3764-4c1f-a2fb-5e604ccf27da\" (UID: \"6747722c-3764-4c1f-a2fb-5e604ccf27da\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.375326 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "28ddb94c-f564-4ac2-b665-02f5c1b7d96d" (UID: "28ddb94c-f564-4ac2-b665-02f5c1b7d96d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.375559 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.375899 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6747722c-3764-4c1f-a2fb-5e604ccf27da-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6747722c-3764-4c1f-a2fb-5e604ccf27da" (UID: "6747722c-3764-4c1f-a2fb-5e604ccf27da"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.380399 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-kube-api-access-hnl5z" (OuterVolumeSpecName: "kube-api-access-hnl5z") pod "28ddb94c-f564-4ac2-b665-02f5c1b7d96d" (UID: "28ddb94c-f564-4ac2-b665-02f5c1b7d96d"). InnerVolumeSpecName "kube-api-access-hnl5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.395842 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6747722c-3764-4c1f-a2fb-5e604ccf27da-kube-api-access-cwz4d" (OuterVolumeSpecName: "kube-api-access-cwz4d") pod "6747722c-3764-4c1f-a2fb-5e604ccf27da" (UID: "6747722c-3764-4c1f-a2fb-5e604ccf27da"). InnerVolumeSpecName "kube-api-access-cwz4d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.445405 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-379c-account-create-update-b9q87" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.445420 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-379c-account-create-update-b9q87" event={"ID":"28ddb94c-f564-4ac2-b665-02f5c1b7d96d","Type":"ContainerDied","Data":"5765bb631fec44001d7e3963516105a594fb87a8b602d5622c7b4aaa0ffc2f74"} Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.445457 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5765bb631fec44001d7e3963516105a594fb87a8b602d5622c7b4aaa0ffc2f74" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.447439 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-837f-account-create-update-x4tph" event={"ID":"f95fe79f-e849-42ef-bb76-1fe84548c3ae","Type":"ContainerDied","Data":"25aca510d749cd27a3a83ee5d133bc25d658c1fd67e8f149f7c3875d040e3a8c"} Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.447459 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25aca510d749cd27a3a83ee5d133bc25d658c1fd67e8f149f7c3875d040e3a8c" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.447468 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-837f-account-create-update-x4tph" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.449216 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-cj65m" event={"ID":"c52ccccd-b22d-48e7-a20f-a612751942e5","Type":"ContainerDied","Data":"d38fec2a5db2cf41ea84951b41619bd08c3aa4368f52987336d9eaceee55555b"} Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.449255 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d38fec2a5db2cf41ea84951b41619bd08c3aa4368f52987336d9eaceee55555b" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.449386 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-cj65m" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.451504 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-w6xq8" event={"ID":"6747722c-3764-4c1f-a2fb-5e604ccf27da","Type":"ContainerDied","Data":"8034039ed401a4147c86a674f88cc06d86e7a31f09e512a6694fb6cc49c260f4"} Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.451542 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8034039ed401a4147c86a674f88cc06d86e7a31f09e512a6694fb6cc49c260f4" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.451592 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-w6xq8" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.454203 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-xwr28" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.454330 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-xwr28" event={"ID":"df3949de-0120-4289-8dfa-71e0ea70deaf","Type":"ContainerDied","Data":"70b456f4d6906c318a0f01320af43157838fd76ad092119655ab30860cf4a39e"} Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.454433 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="70b456f4d6906c318a0f01320af43157838fd76ad092119655ab30860cf4a39e" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.476916 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwz4d\" (UniqueName: \"kubernetes.io/projected/6747722c-3764-4c1f-a2fb-5e604ccf27da-kube-api-access-cwz4d\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.477118 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6747722c-3764-4c1f-a2fb-5e604ccf27da-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.477179 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnl5z\" (UniqueName: \"kubernetes.io/projected/28ddb94c-f564-4ac2-b665-02f5c1b7d96d-kube-api-access-hnl5z\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.714676 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.781780 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bqjf\" (UniqueName: \"kubernetes.io/projected/c0cffe75-b02c-411f-8f5c-3eec6d36659d-kube-api-access-7bqjf\") pod \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.781962 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0cffe75-b02c-411f-8f5c-3eec6d36659d-operator-scripts\") pod \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\" (UID: \"c0cffe75-b02c-411f-8f5c-3eec6d36659d\") " Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.782679 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0cffe75-b02c-411f-8f5c-3eec6d36659d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c0cffe75-b02c-411f-8f5c-3eec6d36659d" (UID: "c0cffe75-b02c-411f-8f5c-3eec6d36659d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.786808 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0cffe75-b02c-411f-8f5c-3eec6d36659d-kube-api-access-7bqjf" (OuterVolumeSpecName: "kube-api-access-7bqjf") pod "c0cffe75-b02c-411f-8f5c-3eec6d36659d" (UID: "c0cffe75-b02c-411f-8f5c-3eec6d36659d"). InnerVolumeSpecName "kube-api-access-7bqjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.884763 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bqjf\" (UniqueName: \"kubernetes.io/projected/c0cffe75-b02c-411f-8f5c-3eec6d36659d-kube-api-access-7bqjf\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:38 crc kubenswrapper[4857]: I1128 13:52:38.884796 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0cffe75-b02c-411f-8f5c-3eec6d36659d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.465577 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerStarted","Data":"82dfef1adc4df63d4acee7ce4e1ed13606f5d853a64a141bfc0524e5cff90910"} Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.465697 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-central-agent" containerID="cri-o://fbb7d8f0bf5ce14f12aa752f5ba397a1ce9352ef7e7c6d9d000ec8414cd5fa73" gracePeriod=30 Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.465766 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="proxy-httpd" containerID="cri-o://82dfef1adc4df63d4acee7ce4e1ed13606f5d853a64a141bfc0524e5cff90910" gracePeriod=30 Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.465791 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="sg-core" containerID="cri-o://a005b3e5458e4a1153902e3eceef5303f2ff864baaeae1cbe944d1dc7cd29024" gracePeriod=30 Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.465845 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-notification-agent" containerID="cri-o://b226d001d2f99f1d3c486d0c7912339f57fb7482458cd7d270ff2987378d287a" gracePeriod=30 Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.465967 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.469382 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" event={"ID":"c0cffe75-b02c-411f-8f5c-3eec6d36659d","Type":"ContainerDied","Data":"f04e4b01885af6c5f28b6bf14651c005ca93a92f1bba19b29cb2f0e273755e0a"} Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.469424 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f04e4b01885af6c5f28b6bf14651c005ca93a92f1bba19b29cb2f0e273755e0a" Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.469440 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-96aa-account-create-update-2bzpb" Nov 28 13:52:39 crc kubenswrapper[4857]: I1128 13:52:39.496184 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.040472763 podStartE2EDuration="6.496165868s" podCreationTimestamp="2025-11-28 13:52:33 +0000 UTC" firstStartedPulling="2025-11-28 13:52:34.205571244 +0000 UTC m=+1404.329512681" lastFinishedPulling="2025-11-28 13:52:38.661264349 +0000 UTC m=+1408.785205786" observedRunningTime="2025-11-28 13:52:39.494060713 +0000 UTC m=+1409.618002150" watchObservedRunningTime="2025-11-28 13:52:39.496165868 +0000 UTC m=+1409.620107315" Nov 28 13:52:40 crc kubenswrapper[4857]: I1128 13:52:40.482588 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerID="82dfef1adc4df63d4acee7ce4e1ed13606f5d853a64a141bfc0524e5cff90910" exitCode=0 Nov 28 13:52:40 crc kubenswrapper[4857]: I1128 13:52:40.482967 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerID="a005b3e5458e4a1153902e3eceef5303f2ff864baaeae1cbe944d1dc7cd29024" exitCode=2 Nov 28 13:52:40 crc kubenswrapper[4857]: I1128 13:52:40.482977 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerID="b226d001d2f99f1d3c486d0c7912339f57fb7482458cd7d270ff2987378d287a" exitCode=0 Nov 28 13:52:40 crc kubenswrapper[4857]: I1128 13:52:40.482629 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerDied","Data":"82dfef1adc4df63d4acee7ce4e1ed13606f5d853a64a141bfc0524e5cff90910"} Nov 28 13:52:40 crc kubenswrapper[4857]: I1128 13:52:40.483014 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerDied","Data":"a005b3e5458e4a1153902e3eceef5303f2ff864baaeae1cbe944d1dc7cd29024"} Nov 28 13:52:40 crc kubenswrapper[4857]: I1128 13:52:40.483028 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerDied","Data":"b226d001d2f99f1d3c486d0c7912339f57fb7482458cd7d270ff2987378d287a"} Nov 28 13:52:41 crc kubenswrapper[4857]: I1128 13:52:41.308603 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:52:41 crc kubenswrapper[4857]: I1128 13:52:41.308671 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.509306 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerID="fbb7d8f0bf5ce14f12aa752f5ba397a1ce9352ef7e7c6d9d000ec8414cd5fa73" exitCode=0 Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.509644 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerDied","Data":"fbb7d8f0bf5ce14f12aa752f5ba397a1ce9352ef7e7c6d9d000ec8414cd5fa73"} Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.750183 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.850979 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-log-httpd\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.851090 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-combined-ca-bundle\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.851113 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-run-httpd\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.851129 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-sg-core-conf-yaml\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.851189 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2g8t\" (UniqueName: \"kubernetes.io/projected/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-kube-api-access-r2g8t\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.851223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-scripts\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.851240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-config-data\") pod \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\" (UID: \"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a\") " Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.852611 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.852862 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.857023 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-kube-api-access-r2g8t" (OuterVolumeSpecName: "kube-api-access-r2g8t") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "kube-api-access-r2g8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.857641 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-scripts" (OuterVolumeSpecName: "scripts") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.883101 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.926331 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.953253 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.953289 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.953301 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.953309 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.953318 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2g8t\" (UniqueName: \"kubernetes.io/projected/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-kube-api-access-r2g8t\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.953327 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:42 crc kubenswrapper[4857]: I1128 13:52:42.954760 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-config-data" (OuterVolumeSpecName: "config-data") pod "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" (UID: "ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.056225 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.520305 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a","Type":"ContainerDied","Data":"ebb845d601c4a74f8af6d019352a46b6ea1e1380ef39ead8c8aff0023824c262"} Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.520351 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.520371 4857 scope.go:117] "RemoveContainer" containerID="82dfef1adc4df63d4acee7ce4e1ed13606f5d853a64a141bfc0524e5cff90910" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.559978 4857 scope.go:117] "RemoveContainer" containerID="a005b3e5458e4a1153902e3eceef5303f2ff864baaeae1cbe944d1dc7cd29024" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.577694 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.585153 4857 scope.go:117] "RemoveContainer" containerID="b226d001d2f99f1d3c486d0c7912339f57fb7482458cd7d270ff2987378d287a" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.597994 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.611491 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612533 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-notification-agent" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612552 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-notification-agent" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612571 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95fe79f-e849-42ef-bb76-1fe84548c3ae" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612578 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95fe79f-e849-42ef-bb76-1fe84548c3ae" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612592 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="sg-core" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612600 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="sg-core" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612614 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28ddb94c-f564-4ac2-b665-02f5c1b7d96d" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612630 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28ddb94c-f564-4ac2-b665-02f5c1b7d96d" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612651 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6747722c-3764-4c1f-a2fb-5e604ccf27da" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612659 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6747722c-3764-4c1f-a2fb-5e604ccf27da" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612674 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0cffe75-b02c-411f-8f5c-3eec6d36659d" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612692 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0cffe75-b02c-411f-8f5c-3eec6d36659d" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612706 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-central-agent" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612713 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-central-agent" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612760 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="proxy-httpd" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612769 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="proxy-httpd" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612780 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3949de-0120-4289-8dfa-71e0ea70deaf" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612787 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3949de-0120-4289-8dfa-71e0ea70deaf" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: E1128 13:52:43.612800 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c52ccccd-b22d-48e7-a20f-a612751942e5" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.612807 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c52ccccd-b22d-48e7-a20f-a612751942e5" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613074 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c52ccccd-b22d-48e7-a20f-a612751942e5" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613094 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-notification-agent" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613108 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0cffe75-b02c-411f-8f5c-3eec6d36659d" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613131 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6747722c-3764-4c1f-a2fb-5e604ccf27da" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613142 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="sg-core" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613176 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="28ddb94c-f564-4ac2-b665-02f5c1b7d96d" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613209 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="df3949de-0120-4289-8dfa-71e0ea70deaf" containerName="mariadb-database-create" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613221 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="ceilometer-central-agent" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613233 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" containerName="proxy-httpd" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.613243 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f95fe79f-e849-42ef-bb76-1fe84548c3ae" containerName="mariadb-account-create-update" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.615341 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.619984 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.620118 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.620185 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.629455 4857 scope.go:117] "RemoveContainer" containerID="fbb7d8f0bf5ce14f12aa752f5ba397a1ce9352ef7e7c6d9d000ec8414cd5fa73" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.645034 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.645091 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.668317 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.668445 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-run-httpd\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.668477 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.668565 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bn7sf\" (UniqueName: \"kubernetes.io/projected/022ba553-92f0-4f30-a239-8a0e69c0fd8e-kube-api-access-bn7sf\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.668627 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-scripts\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.669162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-config-data\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.669203 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-log-httpd\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.674483 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.690556 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.771927 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.772031 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-run-httpd\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.772055 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.772342 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bn7sf\" (UniqueName: \"kubernetes.io/projected/022ba553-92f0-4f30-a239-8a0e69c0fd8e-kube-api-access-bn7sf\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.772411 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-scripts\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.772472 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-config-data\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.772490 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-log-httpd\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.773466 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-run-httpd\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.773704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-log-httpd\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.777042 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-scripts\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.777854 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.778760 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-config-data\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.780764 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.796588 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bn7sf\" (UniqueName: \"kubernetes.io/projected/022ba553-92f0-4f30-a239-8a0e69c0fd8e-kube-api-access-bn7sf\") pod \"ceilometer-0\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.936877 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:43 crc kubenswrapper[4857]: I1128 13:52:43.963655 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.252585 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a" path="/var/lib/kubelet/pods/ab9cc5af-eee9-4c8a-a6a2-aa5b7d5f337a/volumes" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.267000 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jvrc5"] Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.268322 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.274204 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.274452 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.274616 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-982cr" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.282646 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.282719 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-scripts\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.282824 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-config-data\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.282853 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js6j2\" (UniqueName: \"kubernetes.io/projected/b492461f-69d0-4020-84d9-68adae5ebe0e-kube-api-access-js6j2\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.287868 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jvrc5"] Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.384632 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.384718 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-scripts\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.384797 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-config-data\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.384817 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js6j2\" (UniqueName: \"kubernetes.io/projected/b492461f-69d0-4020-84d9-68adae5ebe0e-kube-api-access-js6j2\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.391620 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.391637 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-config-data\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.407015 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-scripts\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.414231 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js6j2\" (UniqueName: \"kubernetes.io/projected/b492461f-69d0-4020-84d9-68adae5ebe0e-kube-api-access-js6j2\") pod \"nova-cell0-conductor-db-sync-jvrc5\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.489928 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.534474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerStarted","Data":"d1e1bc191a501bd692e898381cf924a27f67d02d21c627cf48a1c46a507cdbcf"} Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.534540 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.534660 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.594979 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.803351 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.803694 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.844565 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:44 crc kubenswrapper[4857]: I1128 13:52:44.859432 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:45 crc kubenswrapper[4857]: W1128 13:52:45.115018 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb492461f_69d0_4020_84d9_68adae5ebe0e.slice/crio-da18ea6b40d6054b63d70c9e4c95e6b65a3e1d427b292f9531bbfcc170254549 WatchSource:0}: Error finding container da18ea6b40d6054b63d70c9e4c95e6b65a3e1d427b292f9531bbfcc170254549: Status 404 returned error can't find the container with id da18ea6b40d6054b63d70c9e4c95e6b65a3e1d427b292f9531bbfcc170254549 Nov 28 13:52:45 crc kubenswrapper[4857]: I1128 13:52:45.117150 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jvrc5"] Nov 28 13:52:45 crc kubenswrapper[4857]: I1128 13:52:45.547577 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerStarted","Data":"228fecb4decf95941ad8622e190a3c35ebe0c878142c3ec06cb45beec8910649"} Nov 28 13:52:45 crc kubenswrapper[4857]: I1128 13:52:45.548858 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" event={"ID":"b492461f-69d0-4020-84d9-68adae5ebe0e","Type":"ContainerStarted","Data":"da18ea6b40d6054b63d70c9e4c95e6b65a3e1d427b292f9531bbfcc170254549"} Nov 28 13:52:45 crc kubenswrapper[4857]: I1128 13:52:45.549468 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:45 crc kubenswrapper[4857]: I1128 13:52:45.549493 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:46 crc kubenswrapper[4857]: I1128 13:52:46.613963 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerStarted","Data":"2788c2be124513148c7c20f71932ae50c89233ab97589cdbc785e592bedda3db"} Nov 28 13:52:46 crc kubenswrapper[4857]: I1128 13:52:46.614693 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:52:46 crc kubenswrapper[4857]: I1128 13:52:46.615626 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:52:47 crc kubenswrapper[4857]: I1128 13:52:47.200462 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:52:47 crc kubenswrapper[4857]: I1128 13:52:47.421837 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 13:52:47 crc kubenswrapper[4857]: I1128 13:52:47.629065 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerStarted","Data":"7ecda9f799a5cba860042e1e37b832b55d1a2667f2692e5c0f4f5d3ea71475ec"} Nov 28 13:52:48 crc kubenswrapper[4857]: I1128 13:52:48.106708 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:48 crc kubenswrapper[4857]: I1128 13:52:48.106826 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 13:52:48 crc kubenswrapper[4857]: I1128 13:52:48.132353 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.660241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerStarted","Data":"8fa241d9a05951f1beb4bacac3370a2bd937786ccb2b9348350cf7f25a51dfc1"} Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.660527 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="sg-core" containerID="cri-o://7ecda9f799a5cba860042e1e37b832b55d1a2667f2692e5c0f4f5d3ea71475ec" gracePeriod=30 Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.660537 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="proxy-httpd" containerID="cri-o://8fa241d9a05951f1beb4bacac3370a2bd937786ccb2b9348350cf7f25a51dfc1" gracePeriod=30 Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.660567 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-notification-agent" containerID="cri-o://2788c2be124513148c7c20f71932ae50c89233ab97589cdbc785e592bedda3db" gracePeriod=30 Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.660583 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-central-agent" containerID="cri-o://228fecb4decf95941ad8622e190a3c35ebe0c878142c3ec06cb45beec8910649" gracePeriod=30 Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.661824 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:52:50 crc kubenswrapper[4857]: I1128 13:52:50.701177 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.717359553 podStartE2EDuration="7.701156175s" podCreationTimestamp="2025-11-28 13:52:43 +0000 UTC" firstStartedPulling="2025-11-28 13:52:44.489803211 +0000 UTC m=+1414.613744648" lastFinishedPulling="2025-11-28 13:52:49.473599833 +0000 UTC m=+1419.597541270" observedRunningTime="2025-11-28 13:52:50.677724868 +0000 UTC m=+1420.801666305" watchObservedRunningTime="2025-11-28 13:52:50.701156175 +0000 UTC m=+1420.825097612" Nov 28 13:52:51 crc kubenswrapper[4857]: I1128 13:52:51.675374 4857 generic.go:334] "Generic (PLEG): container finished" podID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerID="8fa241d9a05951f1beb4bacac3370a2bd937786ccb2b9348350cf7f25a51dfc1" exitCode=0 Nov 28 13:52:51 crc kubenswrapper[4857]: I1128 13:52:51.675738 4857 generic.go:334] "Generic (PLEG): container finished" podID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerID="7ecda9f799a5cba860042e1e37b832b55d1a2667f2692e5c0f4f5d3ea71475ec" exitCode=2 Nov 28 13:52:51 crc kubenswrapper[4857]: I1128 13:52:51.675751 4857 generic.go:334] "Generic (PLEG): container finished" podID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerID="2788c2be124513148c7c20f71932ae50c89233ab97589cdbc785e592bedda3db" exitCode=0 Nov 28 13:52:51 crc kubenswrapper[4857]: I1128 13:52:51.675476 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerDied","Data":"8fa241d9a05951f1beb4bacac3370a2bd937786ccb2b9348350cf7f25a51dfc1"} Nov 28 13:52:51 crc kubenswrapper[4857]: I1128 13:52:51.675794 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerDied","Data":"7ecda9f799a5cba860042e1e37b832b55d1a2667f2692e5c0f4f5d3ea71475ec"} Nov 28 13:52:51 crc kubenswrapper[4857]: I1128 13:52:51.675816 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerDied","Data":"2788c2be124513148c7c20f71932ae50c89233ab97589cdbc785e592bedda3db"} Nov 28 13:52:54 crc kubenswrapper[4857]: I1128 13:52:54.705182 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" event={"ID":"b492461f-69d0-4020-84d9-68adae5ebe0e","Type":"ContainerStarted","Data":"089005b3ae005975141be945a4d75ab3ebb78652421b1e38e82689bc954e5745"} Nov 28 13:52:54 crc kubenswrapper[4857]: I1128 13:52:54.723792 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" podStartSLOduration=1.6386240170000002 podStartE2EDuration="10.723772292s" podCreationTimestamp="2025-11-28 13:52:44 +0000 UTC" firstStartedPulling="2025-11-28 13:52:45.117378279 +0000 UTC m=+1415.241319716" lastFinishedPulling="2025-11-28 13:52:54.202526534 +0000 UTC m=+1424.326467991" observedRunningTime="2025-11-28 13:52:54.717246011 +0000 UTC m=+1424.841187458" watchObservedRunningTime="2025-11-28 13:52:54.723772292 +0000 UTC m=+1424.847713729" Nov 28 13:52:57 crc kubenswrapper[4857]: I1128 13:52:57.744339 4857 generic.go:334] "Generic (PLEG): container finished" podID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerID="228fecb4decf95941ad8622e190a3c35ebe0c878142c3ec06cb45beec8910649" exitCode=0 Nov 28 13:52:57 crc kubenswrapper[4857]: I1128 13:52:57.744462 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerDied","Data":"228fecb4decf95941ad8622e190a3c35ebe0c878142c3ec06cb45beec8910649"} Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.125488 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.159804 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-combined-ca-bundle\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.159893 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-config-data\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.160007 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-scripts\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.160027 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-sg-core-conf-yaml\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.160094 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bn7sf\" (UniqueName: \"kubernetes.io/projected/022ba553-92f0-4f30-a239-8a0e69c0fd8e-kube-api-access-bn7sf\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.160128 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-run-httpd\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.160157 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-log-httpd\") pod \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\" (UID: \"022ba553-92f0-4f30-a239-8a0e69c0fd8e\") " Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.161008 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.165266 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.169183 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/022ba553-92f0-4f30-a239-8a0e69c0fd8e-kube-api-access-bn7sf" (OuterVolumeSpecName: "kube-api-access-bn7sf") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "kube-api-access-bn7sf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.170139 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-scripts" (OuterVolumeSpecName: "scripts") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.192060 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.228850 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.257762 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-config-data" (OuterVolumeSpecName: "config-data") pod "022ba553-92f0-4f30-a239-8a0e69c0fd8e" (UID: "022ba553-92f0-4f30-a239-8a0e69c0fd8e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262486 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262559 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262570 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262578 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/022ba553-92f0-4f30-a239-8a0e69c0fd8e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262587 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bn7sf\" (UniqueName: \"kubernetes.io/projected/022ba553-92f0-4f30-a239-8a0e69c0fd8e-kube-api-access-bn7sf\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262598 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.262606 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/022ba553-92f0-4f30-a239-8a0e69c0fd8e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.758641 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"022ba553-92f0-4f30-a239-8a0e69c0fd8e","Type":"ContainerDied","Data":"d1e1bc191a501bd692e898381cf924a27f67d02d21c627cf48a1c46a507cdbcf"} Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.758695 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.758704 4857 scope.go:117] "RemoveContainer" containerID="8fa241d9a05951f1beb4bacac3370a2bd937786ccb2b9348350cf7f25a51dfc1" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.795190 4857 scope.go:117] "RemoveContainer" containerID="7ecda9f799a5cba860042e1e37b832b55d1a2667f2692e5c0f4f5d3ea71475ec" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.848871 4857 scope.go:117] "RemoveContainer" containerID="2788c2be124513148c7c20f71932ae50c89233ab97589cdbc785e592bedda3db" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.865088 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.876453 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.889784 4857 scope.go:117] "RemoveContainer" containerID="228fecb4decf95941ad8622e190a3c35ebe0c878142c3ec06cb45beec8910649" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.890892 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:58 crc kubenswrapper[4857]: E1128 13:52:58.891256 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="sg-core" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891274 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="sg-core" Nov 28 13:52:58 crc kubenswrapper[4857]: E1128 13:52:58.891289 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="proxy-httpd" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891295 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="proxy-httpd" Nov 28 13:52:58 crc kubenswrapper[4857]: E1128 13:52:58.891305 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-notification-agent" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891312 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-notification-agent" Nov 28 13:52:58 crc kubenswrapper[4857]: E1128 13:52:58.891331 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-central-agent" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891337 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-central-agent" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891493 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="sg-core" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891502 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-notification-agent" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891509 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="ceilometer-central-agent" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.891526 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" containerName="proxy-httpd" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.893102 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.896081 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.896381 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.901431 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.978796 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.978844 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-scripts\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.978869 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9vkb\" (UniqueName: \"kubernetes.io/projected/a3a426a9-b190-45d1-8ded-d859d930aa46-kube-api-access-w9vkb\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.978885 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.978981 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-config-data\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.979217 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-run-httpd\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:58 crc kubenswrapper[4857]: I1128 13:52:58.979468 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-log-httpd\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081556 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-log-httpd\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081644 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081675 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-scripts\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081700 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9vkb\" (UniqueName: \"kubernetes.io/projected/a3a426a9-b190-45d1-8ded-d859d930aa46-kube-api-access-w9vkb\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081721 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081744 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-config-data\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.081777 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-run-httpd\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.082134 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-log-httpd\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.082271 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-run-httpd\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.086973 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-config-data\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.087206 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.087689 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.090648 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-scripts\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.098187 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9vkb\" (UniqueName: \"kubernetes.io/projected/a3a426a9-b190-45d1-8ded-d859d930aa46-kube-api-access-w9vkb\") pod \"ceilometer-0\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.213913 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.677937 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:52:59 crc kubenswrapper[4857]: W1128 13:52:59.681076 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda3a426a9_b190_45d1_8ded_d859d930aa46.slice/crio-e7c6a1907049909795fea39d2f876f5baf21da9d59f56988e89c435c9cfbfe0e WatchSource:0}: Error finding container e7c6a1907049909795fea39d2f876f5baf21da9d59f56988e89c435c9cfbfe0e: Status 404 returned error can't find the container with id e7c6a1907049909795fea39d2f876f5baf21da9d59f56988e89c435c9cfbfe0e Nov 28 13:52:59 crc kubenswrapper[4857]: I1128 13:52:59.771440 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerStarted","Data":"e7c6a1907049909795fea39d2f876f5baf21da9d59f56988e89c435c9cfbfe0e"} Nov 28 13:53:00 crc kubenswrapper[4857]: I1128 13:53:00.249242 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="022ba553-92f0-4f30-a239-8a0e69c0fd8e" path="/var/lib/kubelet/pods/022ba553-92f0-4f30-a239-8a0e69c0fd8e/volumes" Nov 28 13:53:00 crc kubenswrapper[4857]: I1128 13:53:00.781296 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerStarted","Data":"3550e137178d18c59d49e08f8ba96f4865ee8708db2f6e837fe17fedaeee3539"} Nov 28 13:53:01 crc kubenswrapper[4857]: I1128 13:53:01.823788 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerStarted","Data":"7d6b6e7e5e9cc36ce54a8b4ffd83d724c615708f5364d1ae2e912b47662fbf4c"} Nov 28 13:53:02 crc kubenswrapper[4857]: I1128 13:53:02.838175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerStarted","Data":"625f21229df21e17ad567862d2f50eae24cbad68ce3c927378a2c02395fc6d18"} Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.665098 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.149:9292/healthcheck\": dial tcp 10.217.0.149:9292: i/o timeout" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.665098 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="37e5bb25-0df5-40da-996d-b4e23120822b" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.149:9292/healthcheck\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.851483 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerStarted","Data":"b739607a7d3bed26bfdef1e161fe7a512799ebf330a44835b6eebf8c36ffb70a"} Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.852136 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:53:03 crc kubenswrapper[4857]: I1128 13:53:03.950228 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.112035537 podStartE2EDuration="5.950206317s" podCreationTimestamp="2025-11-28 13:52:58 +0000 UTC" firstStartedPulling="2025-11-28 13:52:59.683363567 +0000 UTC m=+1429.807305004" lastFinishedPulling="2025-11-28 13:53:03.521534347 +0000 UTC m=+1433.645475784" observedRunningTime="2025-11-28 13:53:03.943676545 +0000 UTC m=+1434.067617982" watchObservedRunningTime="2025-11-28 13:53:03.950206317 +0000 UTC m=+1434.074147754" Nov 28 13:53:06 crc kubenswrapper[4857]: I1128 13:53:06.886009 4857 generic.go:334] "Generic (PLEG): container finished" podID="b492461f-69d0-4020-84d9-68adae5ebe0e" containerID="089005b3ae005975141be945a4d75ab3ebb78652421b1e38e82689bc954e5745" exitCode=0 Nov 28 13:53:06 crc kubenswrapper[4857]: I1128 13:53:06.886090 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" event={"ID":"b492461f-69d0-4020-84d9-68adae5ebe0e","Type":"ContainerDied","Data":"089005b3ae005975141be945a4d75ab3ebb78652421b1e38e82689bc954e5745"} Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.232032 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.267882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-combined-ca-bundle\") pod \"b492461f-69d0-4020-84d9-68adae5ebe0e\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.267967 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js6j2\" (UniqueName: \"kubernetes.io/projected/b492461f-69d0-4020-84d9-68adae5ebe0e-kube-api-access-js6j2\") pod \"b492461f-69d0-4020-84d9-68adae5ebe0e\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.268014 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-scripts\") pod \"b492461f-69d0-4020-84d9-68adae5ebe0e\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.268058 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-config-data\") pod \"b492461f-69d0-4020-84d9-68adae5ebe0e\" (UID: \"b492461f-69d0-4020-84d9-68adae5ebe0e\") " Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.313581 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-scripts" (OuterVolumeSpecName: "scripts") pod "b492461f-69d0-4020-84d9-68adae5ebe0e" (UID: "b492461f-69d0-4020-84d9-68adae5ebe0e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.314150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b492461f-69d0-4020-84d9-68adae5ebe0e-kube-api-access-js6j2" (OuterVolumeSpecName: "kube-api-access-js6j2") pod "b492461f-69d0-4020-84d9-68adae5ebe0e" (UID: "b492461f-69d0-4020-84d9-68adae5ebe0e"). InnerVolumeSpecName "kube-api-access-js6j2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.318420 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b492461f-69d0-4020-84d9-68adae5ebe0e" (UID: "b492461f-69d0-4020-84d9-68adae5ebe0e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.319085 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-config-data" (OuterVolumeSpecName: "config-data") pod "b492461f-69d0-4020-84d9-68adae5ebe0e" (UID: "b492461f-69d0-4020-84d9-68adae5ebe0e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.370389 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.370431 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js6j2\" (UniqueName: \"kubernetes.io/projected/b492461f-69d0-4020-84d9-68adae5ebe0e-kube-api-access-js6j2\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.370442 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.370450 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b492461f-69d0-4020-84d9-68adae5ebe0e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.907446 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" event={"ID":"b492461f-69d0-4020-84d9-68adae5ebe0e","Type":"ContainerDied","Data":"da18ea6b40d6054b63d70c9e4c95e6b65a3e1d427b292f9531bbfcc170254549"} Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.907841 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da18ea6b40d6054b63d70c9e4c95e6b65a3e1d427b292f9531bbfcc170254549" Nov 28 13:53:08 crc kubenswrapper[4857]: I1128 13:53:08.907755 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-jvrc5" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.400731 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:53:09 crc kubenswrapper[4857]: E1128 13:53:09.401202 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b492461f-69d0-4020-84d9-68adae5ebe0e" containerName="nova-cell0-conductor-db-sync" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.401220 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b492461f-69d0-4020-84d9-68adae5ebe0e" containerName="nova-cell0-conductor-db-sync" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.401486 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b492461f-69d0-4020-84d9-68adae5ebe0e" containerName="nova-cell0-conductor-db-sync" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.402222 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.440822 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-982cr" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.441759 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.456403 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.488871 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.489030 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmgs7\" (UniqueName: \"kubernetes.io/projected/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-kube-api-access-mmgs7\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.489162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.591153 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmgs7\" (UniqueName: \"kubernetes.io/projected/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-kube-api-access-mmgs7\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.591222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.591293 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.596234 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.604391 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.607337 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmgs7\" (UniqueName: \"kubernetes.io/projected/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-kube-api-access-mmgs7\") pod \"nova-cell0-conductor-0\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:09 crc kubenswrapper[4857]: I1128 13:53:09.755199 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:10 crc kubenswrapper[4857]: I1128 13:53:10.218808 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:53:10 crc kubenswrapper[4857]: I1128 13:53:10.931335 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d","Type":"ContainerStarted","Data":"ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8"} Nov 28 13:53:10 crc kubenswrapper[4857]: I1128 13:53:10.931702 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:10 crc kubenswrapper[4857]: I1128 13:53:10.931713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d","Type":"ContainerStarted","Data":"ae25b19badcac27915cf81d4556d1e0d3bdadc18cc6fee98258756ac117181b9"} Nov 28 13:53:10 crc kubenswrapper[4857]: I1128 13:53:10.967829 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.967807906 podStartE2EDuration="1.967807906s" podCreationTimestamp="2025-11-28 13:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:10.955461561 +0000 UTC m=+1441.079403088" watchObservedRunningTime="2025-11-28 13:53:10.967807906 +0000 UTC m=+1441.091749353" Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.309257 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.309344 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.309423 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.310309 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.310391 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a" gracePeriod=600 Nov 28 13:53:11 crc kubenswrapper[4857]: E1128 13:53:11.551002 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5d5445a4_417c_448a_a8a0_4a4f81828aff.slice/crio-conmon-46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a.scope\": RecentStats: unable to find data in memory cache]" Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.943747 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a" exitCode=0 Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.943796 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a"} Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.944229 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123"} Nov 28 13:53:11 crc kubenswrapper[4857]: I1128 13:53:11.944261 4857 scope.go:117] "RemoveContainer" containerID="f8014f585ed82233daf2682d55748994b4ded11ee145a4ddfa59430be03e8701" Nov 28 13:53:19 crc kubenswrapper[4857]: I1128 13:53:19.783744 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.252760 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-qxt8d"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.253831 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.261847 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.262591 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.272609 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-qxt8d"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.331303 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-scripts\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.331463 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-config-data\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.331499 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xz86v\" (UniqueName: \"kubernetes.io/projected/b1f6b069-f687-4108-b8b3-2457822d496a-kube-api-access-xz86v\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.331520 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.427332 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.429126 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.432293 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.432737 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-config-data\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.432806 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xz86v\" (UniqueName: \"kubernetes.io/projected/b1f6b069-f687-4108-b8b3-2457822d496a-kube-api-access-xz86v\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.432838 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.432876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-scripts\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.432998 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.433033 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.433095 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjh9j\" (UniqueName: \"kubernetes.io/projected/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-kube-api-access-gjh9j\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.443149 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-scripts\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.443802 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.452787 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xz86v\" (UniqueName: \"kubernetes.io/projected/b1f6b069-f687-4108-b8b3-2457822d496a-kube-api-access-xz86v\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.457052 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.457803 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-config-data\") pod \"nova-cell0-cell-mapping-qxt8d\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.534990 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.535263 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.535286 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjh9j\" (UniqueName: \"kubernetes.io/projected/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-kube-api-access-gjh9j\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.541623 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.546887 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.570537 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.572985 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.581389 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.589844 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjh9j\" (UniqueName: \"kubernetes.io/projected/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-kube-api-access-gjh9j\") pod \"nova-scheduler-0\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.601022 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.607132 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.630809 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.632290 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.636919 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.643923 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-config-data\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.644496 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjs8v\" (UniqueName: \"kubernetes.io/projected/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-kube-api-access-xjs8v\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.644576 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-logs\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.644687 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcdqk\" (UniqueName: \"kubernetes.io/projected/a6012ba9-a7cb-422d-b120-7699dff9658b-kube-api-access-jcdqk\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.644709 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.644811 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.644841 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.650462 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.719410 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.738048 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.745337 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748525 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcdqk\" (UniqueName: \"kubernetes.io/projected/a6012ba9-a7cb-422d-b120-7699dff9658b-kube-api-access-jcdqk\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748581 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748717 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748816 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-config-data\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjs8v\" (UniqueName: \"kubernetes.io/projected/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-kube-api-access-xjs8v\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.748989 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-logs\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.750607 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.750717 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-logs\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.759673 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-config-data\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.761813 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.762996 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.772941 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.776438 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.779577 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjs8v\" (UniqueName: \"kubernetes.io/projected/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-kube-api-access-xjs8v\") pod \"nova-metadata-0\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " pod="openstack/nova-metadata-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.786092 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcdqk\" (UniqueName: \"kubernetes.io/projected/a6012ba9-a7cb-422d-b120-7699dff9658b-kube-api-access-jcdqk\") pod \"nova-cell1-novncproxy-0\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.814207 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-qcdch"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.820897 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852527 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852573 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-config-data\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852616 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zlrp\" (UniqueName: \"kubernetes.io/projected/6c56933f-fbe9-479a-aa1e-dbf699eb13af-kube-api-access-8zlrp\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852641 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-config\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c56933f-fbe9-479a-aa1e-dbf699eb13af-logs\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852706 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852772 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852791 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852856 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-svc\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4v2l6\" (UniqueName: \"kubernetes.io/projected/d8a47a38-60f9-4084-8341-94da42266558-kube-api-access-4v2l6\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.852515 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-qcdch"] Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.963497 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-config-data\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.964157 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zlrp\" (UniqueName: \"kubernetes.io/projected/6c56933f-fbe9-479a-aa1e-dbf699eb13af-kube-api-access-8zlrp\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.964921 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-config\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.965032 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c56933f-fbe9-479a-aa1e-dbf699eb13af-logs\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.965110 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.965185 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.965219 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.965334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-svc\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.965654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c56933f-fbe9-479a-aa1e-dbf699eb13af-logs\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.968606 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-config\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.970747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-sb\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.971398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-svc\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.971521 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4v2l6\" (UniqueName: \"kubernetes.io/projected/d8a47a38-60f9-4084-8341-94da42266558-kube-api-access-4v2l6\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.971600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.971848 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-swift-storage-0\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.973640 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-nb\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.974518 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-config-data\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.974962 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.990375 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zlrp\" (UniqueName: \"kubernetes.io/projected/6c56933f-fbe9-479a-aa1e-dbf699eb13af-kube-api-access-8zlrp\") pod \"nova-api-0\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " pod="openstack/nova-api-0" Nov 28 13:53:20 crc kubenswrapper[4857]: I1128 13:53:20.995208 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4v2l6\" (UniqueName: \"kubernetes.io/projected/d8a47a38-60f9-4084-8341-94da42266558-kube-api-access-4v2l6\") pod \"dnsmasq-dns-bccf8f775-qcdch\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.055492 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.061756 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.113373 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.181921 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.256855 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-qxt8d"] Nov 28 13:53:21 crc kubenswrapper[4857]: W1128 13:53:21.267362 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb1f6b069_f687_4108_b8b3_2457822d496a.slice/crio-2636b98d84720274fc4f123a1e7adb571d01bf6a94e9001e4cca0095c3a10757 WatchSource:0}: Error finding container 2636b98d84720274fc4f123a1e7adb571d01bf6a94e9001e4cca0095c3a10757: Status 404 returned error can't find the container with id 2636b98d84720274fc4f123a1e7adb571d01bf6a94e9001e4cca0095c3a10757 Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.318871 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.414715 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-khxxx"] Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.416594 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.419718 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.419897 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.426200 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-khxxx"] Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.590465 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-config-data\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.591024 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-scripts\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.591095 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.591128 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xg4t\" (UniqueName: \"kubernetes.io/projected/d26ebe73-c4df-458d-a2f3-1da92d587632-kube-api-access-8xg4t\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.604381 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.692413 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.692791 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xg4t\" (UniqueName: \"kubernetes.io/projected/d26ebe73-c4df-458d-a2f3-1da92d587632-kube-api-access-8xg4t\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.692844 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-config-data\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.692930 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-scripts\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.699011 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.701616 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-config-data\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.707527 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-scripts\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.736187 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xg4t\" (UniqueName: \"kubernetes.io/projected/d26ebe73-c4df-458d-a2f3-1da92d587632-kube-api-access-8xg4t\") pod \"nova-cell1-conductor-db-sync-khxxx\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.829899 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.854703 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:21 crc kubenswrapper[4857]: W1128 13:53:21.854180 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c4d1fd8_27b5_4544_9fe0_7afdbaf191b1.slice/crio-30a6e6d983ddf1ab6cbed4d9e3efd0e6d86efdfd43c42edc991594754308557c WatchSource:0}: Error finding container 30a6e6d983ddf1ab6cbed4d9e3efd0e6d86efdfd43c42edc991594754308557c: Status 404 returned error can't find the container with id 30a6e6d983ddf1ab6cbed4d9e3efd0e6d86efdfd43c42edc991594754308557c Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.881538 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:21 crc kubenswrapper[4857]: W1128 13:53:21.890548 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c56933f_fbe9_479a_aa1e_dbf699eb13af.slice/crio-68f820573b53262decbca0a2ebdd1949c1b49581f4ab1678e80b231599fc8e1d WatchSource:0}: Error finding container 68f820573b53262decbca0a2ebdd1949c1b49581f4ab1678e80b231599fc8e1d: Status 404 returned error can't find the container with id 68f820573b53262decbca0a2ebdd1949c1b49581f4ab1678e80b231599fc8e1d Nov 28 13:53:21 crc kubenswrapper[4857]: I1128 13:53:21.939717 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-qcdch"] Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.065876 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a6012ba9-a7cb-422d-b120-7699dff9658b","Type":"ContainerStarted","Data":"a45507747e50cfdd8070da9bb7d03e670aa203942aa5eea599fdf7046b84b369"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.097885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qxt8d" event={"ID":"b1f6b069-f687-4108-b8b3-2457822d496a","Type":"ContainerStarted","Data":"1456dd1bd3e0121d518cc6a6cd94cf805fa6cf250c06d4aca8868a28844b9064"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.098176 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qxt8d" event={"ID":"b1f6b069-f687-4108-b8b3-2457822d496a","Type":"ContainerStarted","Data":"2636b98d84720274fc4f123a1e7adb571d01bf6a94e9001e4cca0095c3a10757"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.103683 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d","Type":"ContainerStarted","Data":"d226cf25dcf55d36564d86b871ceb37a6a1aac44dd237460f079b935574a85ee"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.108991 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c56933f-fbe9-479a-aa1e-dbf699eb13af","Type":"ContainerStarted","Data":"68f820573b53262decbca0a2ebdd1949c1b49581f4ab1678e80b231599fc8e1d"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.127303 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1","Type":"ContainerStarted","Data":"30a6e6d983ddf1ab6cbed4d9e3efd0e6d86efdfd43c42edc991594754308557c"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.130144 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" event={"ID":"d8a47a38-60f9-4084-8341-94da42266558","Type":"ContainerStarted","Data":"5d8d2b1d0e37f27652e547e4539a5c1bdd6f2740ae2c10eeca6eee8e0c8fe9e1"} Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.131287 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-qxt8d" podStartSLOduration=2.131272959 podStartE2EDuration="2.131272959s" podCreationTimestamp="2025-11-28 13:53:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:22.118448112 +0000 UTC m=+1452.242389549" watchObservedRunningTime="2025-11-28 13:53:22.131272959 +0000 UTC m=+1452.255214396" Nov 28 13:53:22 crc kubenswrapper[4857]: I1128 13:53:22.338205 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-khxxx"] Nov 28 13:53:23 crc kubenswrapper[4857]: I1128 13:53:23.142642 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8a47a38-60f9-4084-8341-94da42266558" containerID="a53c79818510e05af3b0226cdc53a09f21f7f4d8a28bc0ff6b6c0f2f8c909714" exitCode=0 Nov 28 13:53:23 crc kubenswrapper[4857]: I1128 13:53:23.142687 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" event={"ID":"d8a47a38-60f9-4084-8341-94da42266558","Type":"ContainerDied","Data":"a53c79818510e05af3b0226cdc53a09f21f7f4d8a28bc0ff6b6c0f2f8c909714"} Nov 28 13:53:23 crc kubenswrapper[4857]: I1128 13:53:23.150540 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-khxxx" event={"ID":"d26ebe73-c4df-458d-a2f3-1da92d587632","Type":"ContainerStarted","Data":"751f12a19ac746a9f59958709a679ab6e0f8b0c295bc0836d21ad235a87f6a5b"} Nov 28 13:53:23 crc kubenswrapper[4857]: I1128 13:53:23.150590 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-khxxx" event={"ID":"d26ebe73-c4df-458d-a2f3-1da92d587632","Type":"ContainerStarted","Data":"204689276a1d6e5ed4ff6324d6b09811c5b39d6e6b874b34b77cacd9ac3f5442"} Nov 28 13:53:23 crc kubenswrapper[4857]: I1128 13:53:23.184178 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-khxxx" podStartSLOduration=2.184163511 podStartE2EDuration="2.184163511s" podCreationTimestamp="2025-11-28 13:53:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:23.182375804 +0000 UTC m=+1453.306317241" watchObservedRunningTime="2025-11-28 13:53:23.184163511 +0000 UTC m=+1453.308104948" Nov 28 13:53:24 crc kubenswrapper[4857]: I1128 13:53:24.243504 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:24 crc kubenswrapper[4857]: I1128 13:53:24.245791 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.220580 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d","Type":"ContainerStarted","Data":"6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.222126 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c56933f-fbe9-479a-aa1e-dbf699eb13af","Type":"ContainerStarted","Data":"888fe2c7fb9a72d11577c72ea2a6f6697c1a76294064e4ee884fb8613fd8badd"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.222184 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c56933f-fbe9-479a-aa1e-dbf699eb13af","Type":"ContainerStarted","Data":"f33b7982301c419d34731196833b066b89d549cda95aad48f7d2760468d1c871"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.223651 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1","Type":"ContainerStarted","Data":"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.223794 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1","Type":"ContainerStarted","Data":"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.223802 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-metadata" containerID="cri-o://738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1" gracePeriod=30 Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.223751 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-log" containerID="cri-o://8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e" gracePeriod=30 Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.226709 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" event={"ID":"d8a47a38-60f9-4084-8341-94da42266558","Type":"ContainerStarted","Data":"a38af6fde6496d26130d969ef4105a2c9b98637c9a5a25701815126e08a86976"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.226964 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.237534 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="a6012ba9-a7cb-422d-b120-7699dff9658b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://c4a145c6ca168dab4a4a683d69fa5ef3bd70e427cb8b2504227dc729ef8eb648" gracePeriod=30 Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.250397 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a6012ba9-a7cb-422d-b120-7699dff9658b","Type":"ContainerStarted","Data":"c4a145c6ca168dab4a4a683d69fa5ef3bd70e427cb8b2504227dc729ef8eb648"} Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.250628 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.18142496 podStartE2EDuration="6.250613454s" podCreationTimestamp="2025-11-28 13:53:20 +0000 UTC" firstStartedPulling="2025-11-28 13:53:21.324919212 +0000 UTC m=+1451.448860649" lastFinishedPulling="2025-11-28 13:53:25.394107706 +0000 UTC m=+1455.518049143" observedRunningTime="2025-11-28 13:53:26.246728842 +0000 UTC m=+1456.370670279" watchObservedRunningTime="2025-11-28 13:53:26.250613454 +0000 UTC m=+1456.374554891" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.275453 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.758099919 podStartE2EDuration="6.275432558s" podCreationTimestamp="2025-11-28 13:53:20 +0000 UTC" firstStartedPulling="2025-11-28 13:53:21.881698886 +0000 UTC m=+1452.005640323" lastFinishedPulling="2025-11-28 13:53:25.399031525 +0000 UTC m=+1455.522972962" observedRunningTime="2025-11-28 13:53:26.267519919 +0000 UTC m=+1456.391461356" watchObservedRunningTime="2025-11-28 13:53:26.275432558 +0000 UTC m=+1456.399374005" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.288464 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.541374849 podStartE2EDuration="6.2884452s" podCreationTimestamp="2025-11-28 13:53:20 +0000 UTC" firstStartedPulling="2025-11-28 13:53:21.612404523 +0000 UTC m=+1451.736345970" lastFinishedPulling="2025-11-28 13:53:25.359474894 +0000 UTC m=+1455.483416321" observedRunningTime="2025-11-28 13:53:26.287343841 +0000 UTC m=+1456.411285288" watchObservedRunningTime="2025-11-28 13:53:26.2884452 +0000 UTC m=+1456.412386637" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.321172 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" podStartSLOduration=6.321155112 podStartE2EDuration="6.321155112s" podCreationTimestamp="2025-11-28 13:53:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:26.313040098 +0000 UTC m=+1456.436981535" watchObservedRunningTime="2025-11-28 13:53:26.321155112 +0000 UTC m=+1456.445096539" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.343577 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.836385971 podStartE2EDuration="6.343553192s" podCreationTimestamp="2025-11-28 13:53:20 +0000 UTC" firstStartedPulling="2025-11-28 13:53:21.892432549 +0000 UTC m=+1452.016373986" lastFinishedPulling="2025-11-28 13:53:25.39959977 +0000 UTC m=+1455.523541207" observedRunningTime="2025-11-28 13:53:26.329462521 +0000 UTC m=+1456.453403978" watchObservedRunningTime="2025-11-28 13:53:26.343553192 +0000 UTC m=+1456.467494629" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.809780 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.947483 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-logs\") pod \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.948024 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-logs" (OuterVolumeSpecName: "logs") pod "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" (UID: "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.948352 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-config-data\") pod \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.948443 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-combined-ca-bundle\") pod \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.948601 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xjs8v\" (UniqueName: \"kubernetes.io/projected/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-kube-api-access-xjs8v\") pod \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\" (UID: \"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1\") " Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.949401 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.957907 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-kube-api-access-xjs8v" (OuterVolumeSpecName: "kube-api-access-xjs8v") pod "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" (UID: "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1"). InnerVolumeSpecName "kube-api-access-xjs8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.975313 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-config-data" (OuterVolumeSpecName: "config-data") pod "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" (UID: "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:26 crc kubenswrapper[4857]: I1128 13:53:26.983805 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" (UID: "2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.050722 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xjs8v\" (UniqueName: \"kubernetes.io/projected/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-kube-api-access-xjs8v\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.050755 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.050766 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249700 4857 generic.go:334] "Generic (PLEG): container finished" podID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerID="738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1" exitCode=0 Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249731 4857 generic.go:334] "Generic (PLEG): container finished" podID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerID="8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e" exitCode=143 Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249769 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249809 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1","Type":"ContainerDied","Data":"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1"} Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1","Type":"ContainerDied","Data":"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e"} Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1","Type":"ContainerDied","Data":"30a6e6d983ddf1ab6cbed4d9e3efd0e6d86efdfd43c42edc991594754308557c"} Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.249881 4857 scope.go:117] "RemoveContainer" containerID="738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.274680 4857 scope.go:117] "RemoveContainer" containerID="8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.290099 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.309171 4857 scope.go:117] "RemoveContainer" containerID="738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1" Nov 28 13:53:27 crc kubenswrapper[4857]: E1128 13:53:27.309825 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1\": container with ID starting with 738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1 not found: ID does not exist" containerID="738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.309870 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1"} err="failed to get container status \"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1\": rpc error: code = NotFound desc = could not find container \"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1\": container with ID starting with 738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1 not found: ID does not exist" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.309895 4857 scope.go:117] "RemoveContainer" containerID="8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e" Nov 28 13:53:27 crc kubenswrapper[4857]: E1128 13:53:27.310295 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e\": container with ID starting with 8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e not found: ID does not exist" containerID="8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.310327 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e"} err="failed to get container status \"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e\": rpc error: code = NotFound desc = could not find container \"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e\": container with ID starting with 8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e not found: ID does not exist" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.310349 4857 scope.go:117] "RemoveContainer" containerID="738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.310602 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1"} err="failed to get container status \"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1\": rpc error: code = NotFound desc = could not find container \"738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1\": container with ID starting with 738eaaeae67dc9cfceae0399b6f99cb08ceac08d6c6b3a9efbed005f614268f1 not found: ID does not exist" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.310684 4857 scope.go:117] "RemoveContainer" containerID="8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.311172 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e"} err="failed to get container status \"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e\": rpc error: code = NotFound desc = could not find container \"8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e\": container with ID starting with 8c15e23deb428ba6eac2f2d241086073723e2137696b1b9f716e76eb0e46b88e not found: ID does not exist" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.316075 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.329849 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:27 crc kubenswrapper[4857]: E1128 13:53:27.330505 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-log" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.330530 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-log" Nov 28 13:53:27 crc kubenswrapper[4857]: E1128 13:53:27.330557 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-metadata" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.330564 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-metadata" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.330804 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-log" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.330821 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" containerName="nova-metadata-metadata" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.331989 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.337411 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.337879 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.340515 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.456656 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.456795 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fsh4\" (UniqueName: \"kubernetes.io/projected/276ec9eb-c7f6-4496-b301-49832c364c70-kube-api-access-9fsh4\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.456877 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-config-data\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.457133 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.457329 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/276ec9eb-c7f6-4496-b301-49832c364c70-logs\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.559186 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.559330 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/276ec9eb-c7f6-4496-b301-49832c364c70-logs\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.559374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.559451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fsh4\" (UniqueName: \"kubernetes.io/projected/276ec9eb-c7f6-4496-b301-49832c364c70-kube-api-access-9fsh4\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.559509 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-config-data\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.559817 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/276ec9eb-c7f6-4496-b301-49832c364c70-logs\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.563597 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.564815 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-config-data\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.574797 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.580110 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fsh4\" (UniqueName: \"kubernetes.io/projected/276ec9eb-c7f6-4496-b301-49832c364c70-kube-api-access-9fsh4\") pod \"nova-metadata-0\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " pod="openstack/nova-metadata-0" Nov 28 13:53:27 crc kubenswrapper[4857]: I1128 13:53:27.654390 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:28 crc kubenswrapper[4857]: I1128 13:53:28.087310 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:28 crc kubenswrapper[4857]: W1128 13:53:28.088535 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod276ec9eb_c7f6_4496_b301_49832c364c70.slice/crio-ae1dd1b509ffd8ea7b6b81bf26399923880732b9ff9c20260132c1563ebe03ba WatchSource:0}: Error finding container ae1dd1b509ffd8ea7b6b81bf26399923880732b9ff9c20260132c1563ebe03ba: Status 404 returned error can't find the container with id ae1dd1b509ffd8ea7b6b81bf26399923880732b9ff9c20260132c1563ebe03ba Nov 28 13:53:28 crc kubenswrapper[4857]: I1128 13:53:28.248851 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1" path="/var/lib/kubelet/pods/2c4d1fd8-27b5-4544-9fe0-7afdbaf191b1/volumes" Nov 28 13:53:28 crc kubenswrapper[4857]: I1128 13:53:28.263801 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"276ec9eb-c7f6-4496-b301-49832c364c70","Type":"ContainerStarted","Data":"ae1dd1b509ffd8ea7b6b81bf26399923880732b9ff9c20260132c1563ebe03ba"} Nov 28 13:53:29 crc kubenswrapper[4857]: I1128 13:53:29.219803 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 13:53:29 crc kubenswrapper[4857]: I1128 13:53:29.279672 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"276ec9eb-c7f6-4496-b301-49832c364c70","Type":"ContainerStarted","Data":"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3"} Nov 28 13:53:29 crc kubenswrapper[4857]: I1128 13:53:29.279718 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"276ec9eb-c7f6-4496-b301-49832c364c70","Type":"ContainerStarted","Data":"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e"} Nov 28 13:53:29 crc kubenswrapper[4857]: I1128 13:53:29.316366 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.316344559 podStartE2EDuration="2.316344559s" podCreationTimestamp="2025-11-28 13:53:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:29.30575483 +0000 UTC m=+1459.429696267" watchObservedRunningTime="2025-11-28 13:53:29.316344559 +0000 UTC m=+1459.440285996" Nov 28 13:53:30 crc kubenswrapper[4857]: I1128 13:53:30.290506 4857 generic.go:334] "Generic (PLEG): container finished" podID="b1f6b069-f687-4108-b8b3-2457822d496a" containerID="1456dd1bd3e0121d518cc6a6cd94cf805fa6cf250c06d4aca8868a28844b9064" exitCode=0 Nov 28 13:53:30 crc kubenswrapper[4857]: I1128 13:53:30.290741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qxt8d" event={"ID":"b1f6b069-f687-4108-b8b3-2457822d496a","Type":"ContainerDied","Data":"1456dd1bd3e0121d518cc6a6cd94cf805fa6cf250c06d4aca8868a28844b9064"} Nov 28 13:53:30 crc kubenswrapper[4857]: I1128 13:53:30.720515 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 13:53:30 crc kubenswrapper[4857]: I1128 13:53:30.720561 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 13:53:30 crc kubenswrapper[4857]: I1128 13:53:30.751607 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.062353 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.114245 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.114490 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.183376 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.252969 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-rjfmd"] Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.253192 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" podUID="f524461b-de2f-48eb-a378-c255d35327c4" containerName="dnsmasq-dns" containerID="cri-o://db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da" gracePeriod=10 Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.352432 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.805312 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.929205 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.948085 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xz86v\" (UniqueName: \"kubernetes.io/projected/b1f6b069-f687-4108-b8b3-2457822d496a-kube-api-access-xz86v\") pod \"b1f6b069-f687-4108-b8b3-2457822d496a\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.948206 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-scripts\") pod \"b1f6b069-f687-4108-b8b3-2457822d496a\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.948262 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-combined-ca-bundle\") pod \"b1f6b069-f687-4108-b8b3-2457822d496a\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.948281 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-config-data\") pod \"b1f6b069-f687-4108-b8b3-2457822d496a\" (UID: \"b1f6b069-f687-4108-b8b3-2457822d496a\") " Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.965752 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-scripts" (OuterVolumeSpecName: "scripts") pod "b1f6b069-f687-4108-b8b3-2457822d496a" (UID: "b1f6b069-f687-4108-b8b3-2457822d496a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.966234 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1f6b069-f687-4108-b8b3-2457822d496a-kube-api-access-xz86v" (OuterVolumeSpecName: "kube-api-access-xz86v") pod "b1f6b069-f687-4108-b8b3-2457822d496a" (UID: "b1f6b069-f687-4108-b8b3-2457822d496a"). InnerVolumeSpecName "kube-api-access-xz86v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.987235 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1f6b069-f687-4108-b8b3-2457822d496a" (UID: "b1f6b069-f687-4108-b8b3-2457822d496a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:31 crc kubenswrapper[4857]: I1128 13:53:31.993148 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-config-data" (OuterVolumeSpecName: "config-data") pod "b1f6b069-f687-4108-b8b3-2457822d496a" (UID: "b1f6b069-f687-4108-b8b3-2457822d496a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.049477 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-config\") pod \"f524461b-de2f-48eb-a378-c255d35327c4\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.049574 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4m9q\" (UniqueName: \"kubernetes.io/projected/f524461b-de2f-48eb-a378-c255d35327c4-kube-api-access-g4m9q\") pod \"f524461b-de2f-48eb-a378-c255d35327c4\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.049615 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-swift-storage-0\") pod \"f524461b-de2f-48eb-a378-c255d35327c4\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.049698 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-nb\") pod \"f524461b-de2f-48eb-a378-c255d35327c4\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.049741 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-sb\") pod \"f524461b-de2f-48eb-a378-c255d35327c4\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.049846 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-svc\") pod \"f524461b-de2f-48eb-a378-c255d35327c4\" (UID: \"f524461b-de2f-48eb-a378-c255d35327c4\") " Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.050392 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xz86v\" (UniqueName: \"kubernetes.io/projected/b1f6b069-f687-4108-b8b3-2457822d496a-kube-api-access-xz86v\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.050409 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.050422 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.050434 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1f6b069-f687-4108-b8b3-2457822d496a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.065196 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f524461b-de2f-48eb-a378-c255d35327c4-kube-api-access-g4m9q" (OuterVolumeSpecName: "kube-api-access-g4m9q") pod "f524461b-de2f-48eb-a378-c255d35327c4" (UID: "f524461b-de2f-48eb-a378-c255d35327c4"). InnerVolumeSpecName "kube-api-access-g4m9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.133241 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f524461b-de2f-48eb-a378-c255d35327c4" (UID: "f524461b-de2f-48eb-a378-c255d35327c4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.148410 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f524461b-de2f-48eb-a378-c255d35327c4" (UID: "f524461b-de2f-48eb-a378-c255d35327c4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.152372 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4m9q\" (UniqueName: \"kubernetes.io/projected/f524461b-de2f-48eb-a378-c255d35327c4-kube-api-access-g4m9q\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.152401 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.152410 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.164129 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f524461b-de2f-48eb-a378-c255d35327c4" (UID: "f524461b-de2f-48eb-a378-c255d35327c4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.165420 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-config" (OuterVolumeSpecName: "config") pod "f524461b-de2f-48eb-a378-c255d35327c4" (UID: "f524461b-de2f-48eb-a378-c255d35327c4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.166446 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f524461b-de2f-48eb-a378-c255d35327c4" (UID: "f524461b-de2f-48eb-a378-c255d35327c4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.198573 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.199410 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.253979 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.254120 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.254130 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f524461b-de2f-48eb-a378-c255d35327c4-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.348049 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-qxt8d" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.349776 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-qxt8d" event={"ID":"b1f6b069-f687-4108-b8b3-2457822d496a","Type":"ContainerDied","Data":"2636b98d84720274fc4f123a1e7adb571d01bf6a94e9001e4cca0095c3a10757"} Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.349841 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2636b98d84720274fc4f123a1e7adb571d01bf6a94e9001e4cca0095c3a10757" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.352195 4857 generic.go:334] "Generic (PLEG): container finished" podID="f524461b-de2f-48eb-a378-c255d35327c4" containerID="db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da" exitCode=0 Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.352491 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" event={"ID":"f524461b-de2f-48eb-a378-c255d35327c4","Type":"ContainerDied","Data":"db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da"} Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.352550 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" event={"ID":"f524461b-de2f-48eb-a378-c255d35327c4","Type":"ContainerDied","Data":"7acf3b9e2dbfae45fd1e80710cb25da550f8697cddd1c54f86373c5f8512d657"} Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.352576 4857 scope.go:117] "RemoveContainer" containerID="db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.353574 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6578955fd5-rjfmd" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.451282 4857 scope.go:117] "RemoveContainer" containerID="79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.451460 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-rjfmd"] Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.463992 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6578955fd5-rjfmd"] Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.504115 4857 scope.go:117] "RemoveContainer" containerID="db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da" Nov 28 13:53:32 crc kubenswrapper[4857]: E1128 13:53:32.514310 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da\": container with ID starting with db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da not found: ID does not exist" containerID="db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.514537 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da"} err="failed to get container status \"db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da\": rpc error: code = NotFound desc = could not find container \"db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da\": container with ID starting with db2a79cd4b372ee99772e981a9c872cdee6f43d61ebd14f1700e6b47f5f8b6da not found: ID does not exist" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.514656 4857 scope.go:117] "RemoveContainer" containerID="79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b" Nov 28 13:53:32 crc kubenswrapper[4857]: E1128 13:53:32.517686 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b\": container with ID starting with 79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b not found: ID does not exist" containerID="79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.517745 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b"} err="failed to get container status \"79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b\": rpc error: code = NotFound desc = could not find container \"79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b\": container with ID starting with 79c9d3e44f232f36efdc92705a12480722e27afd98eb482386e33e9ef9ed060b not found: ID does not exist" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.518133 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.518396 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-log" containerID="cri-o://f33b7982301c419d34731196833b066b89d549cda95aad48f7d2760468d1c871" gracePeriod=30 Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.518533 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-api" containerID="cri-o://888fe2c7fb9a72d11577c72ea2a6f6697c1a76294064e4ee884fb8613fd8badd" gracePeriod=30 Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.538458 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.561592 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.561839 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-log" containerID="cri-o://c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e" gracePeriod=30 Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.562348 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-metadata" containerID="cri-o://edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3" gracePeriod=30 Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.655146 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:53:32 crc kubenswrapper[4857]: I1128 13:53:32.655473 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.170351 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.270349 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/276ec9eb-c7f6-4496-b301-49832c364c70-logs\") pod \"276ec9eb-c7f6-4496-b301-49832c364c70\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.270429 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-combined-ca-bundle\") pod \"276ec9eb-c7f6-4496-b301-49832c364c70\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.270523 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fsh4\" (UniqueName: \"kubernetes.io/projected/276ec9eb-c7f6-4496-b301-49832c364c70-kube-api-access-9fsh4\") pod \"276ec9eb-c7f6-4496-b301-49832c364c70\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.270582 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-nova-metadata-tls-certs\") pod \"276ec9eb-c7f6-4496-b301-49832c364c70\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.270650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/276ec9eb-c7f6-4496-b301-49832c364c70-logs" (OuterVolumeSpecName: "logs") pod "276ec9eb-c7f6-4496-b301-49832c364c70" (UID: "276ec9eb-c7f6-4496-b301-49832c364c70"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.270808 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-config-data\") pod \"276ec9eb-c7f6-4496-b301-49832c364c70\" (UID: \"276ec9eb-c7f6-4496-b301-49832c364c70\") " Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.271418 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/276ec9eb-c7f6-4496-b301-49832c364c70-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.283252 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/276ec9eb-c7f6-4496-b301-49832c364c70-kube-api-access-9fsh4" (OuterVolumeSpecName: "kube-api-access-9fsh4") pod "276ec9eb-c7f6-4496-b301-49832c364c70" (UID: "276ec9eb-c7f6-4496-b301-49832c364c70"). InnerVolumeSpecName "kube-api-access-9fsh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.299366 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-config-data" (OuterVolumeSpecName: "config-data") pod "276ec9eb-c7f6-4496-b301-49832c364c70" (UID: "276ec9eb-c7f6-4496-b301-49832c364c70"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.306795 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "276ec9eb-c7f6-4496-b301-49832c364c70" (UID: "276ec9eb-c7f6-4496-b301-49832c364c70"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.344485 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "276ec9eb-c7f6-4496-b301-49832c364c70" (UID: "276ec9eb-c7f6-4496-b301-49832c364c70"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.364294 4857 generic.go:334] "Generic (PLEG): container finished" podID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerID="f33b7982301c419d34731196833b066b89d549cda95aad48f7d2760468d1c871" exitCode=143 Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.364382 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c56933f-fbe9-479a-aa1e-dbf699eb13af","Type":"ContainerDied","Data":"f33b7982301c419d34731196833b066b89d549cda95aad48f7d2760468d1c871"} Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367401 4857 generic.go:334] "Generic (PLEG): container finished" podID="276ec9eb-c7f6-4496-b301-49832c364c70" containerID="edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3" exitCode=0 Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367427 4857 generic.go:334] "Generic (PLEG): container finished" podID="276ec9eb-c7f6-4496-b301-49832c364c70" containerID="c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e" exitCode=143 Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367483 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"276ec9eb-c7f6-4496-b301-49832c364c70","Type":"ContainerDied","Data":"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3"} Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367504 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"276ec9eb-c7f6-4496-b301-49832c364c70","Type":"ContainerDied","Data":"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e"} Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367518 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"276ec9eb-c7f6-4496-b301-49832c364c70","Type":"ContainerDied","Data":"ae1dd1b509ffd8ea7b6b81bf26399923880732b9ff9c20260132c1563ebe03ba"} Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367537 4857 scope.go:117] "RemoveContainer" containerID="edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.367802 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.371233 4857 generic.go:334] "Generic (PLEG): container finished" podID="d26ebe73-c4df-458d-a2f3-1da92d587632" containerID="751f12a19ac746a9f59958709a679ab6e0f8b0c295bc0836d21ad235a87f6a5b" exitCode=0 Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.371691 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" containerName="nova-scheduler-scheduler" containerID="cri-o://6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc" gracePeriod=30 Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.371325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-khxxx" event={"ID":"d26ebe73-c4df-458d-a2f3-1da92d587632","Type":"ContainerDied","Data":"751f12a19ac746a9f59958709a679ab6e0f8b0c295bc0836d21ad235a87f6a5b"} Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.372169 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.377179 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.377243 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fsh4\" (UniqueName: \"kubernetes.io/projected/276ec9eb-c7f6-4496-b301-49832c364c70-kube-api-access-9fsh4\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.377256 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.377267 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/276ec9eb-c7f6-4496-b301-49832c364c70-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.377542 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="818836f7-7e48-4477-9e79-41c71000929d" containerName="kube-state-metrics" containerID="cri-o://774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8" gracePeriod=30 Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.589924 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.608690 4857 scope.go:117] "RemoveContainer" containerID="c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.608891 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623104 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.623491 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1f6b069-f687-4108-b8b3-2457822d496a" containerName="nova-manage" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623504 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1f6b069-f687-4108-b8b3-2457822d496a" containerName="nova-manage" Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.623536 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f524461b-de2f-48eb-a378-c255d35327c4" containerName="dnsmasq-dns" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623548 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f524461b-de2f-48eb-a378-c255d35327c4" containerName="dnsmasq-dns" Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.623560 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-log" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623568 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-log" Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.623578 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-metadata" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623584 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-metadata" Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.623597 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f524461b-de2f-48eb-a378-c255d35327c4" containerName="init" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623604 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f524461b-de2f-48eb-a378-c255d35327c4" containerName="init" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623884 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-log" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623905 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f524461b-de2f-48eb-a378-c255d35327c4" containerName="dnsmasq-dns" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623918 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1f6b069-f687-4108-b8b3-2457822d496a" containerName="nova-manage" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.623934 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" containerName="nova-metadata-metadata" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.625188 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.629830 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.630447 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.661934 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.676823 4857 scope.go:117] "RemoveContainer" containerID="edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3" Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.677851 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3\": container with ID starting with edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3 not found: ID does not exist" containerID="edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.677886 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3"} err="failed to get container status \"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3\": rpc error: code = NotFound desc = could not find container \"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3\": container with ID starting with edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3 not found: ID does not exist" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.677908 4857 scope.go:117] "RemoveContainer" containerID="c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e" Nov 28 13:53:33 crc kubenswrapper[4857]: E1128 13:53:33.678887 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e\": container with ID starting with c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e not found: ID does not exist" containerID="c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.678916 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e"} err="failed to get container status \"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e\": rpc error: code = NotFound desc = could not find container \"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e\": container with ID starting with c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e not found: ID does not exist" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.678929 4857 scope.go:117] "RemoveContainer" containerID="edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.681841 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3"} err="failed to get container status \"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3\": rpc error: code = NotFound desc = could not find container \"edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3\": container with ID starting with edf695a4b2aefadfbffa2b1701d12f49267ccef26ccfc8d44f64c14e7baecaa3 not found: ID does not exist" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.681886 4857 scope.go:117] "RemoveContainer" containerID="c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.685174 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e"} err="failed to get container status \"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e\": rpc error: code = NotFound desc = could not find container \"c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e\": container with ID starting with c26fe6539974f9a65b12d395eafe823ae5cba0a0fee5d34240dd4d65b9999a0e not found: ID does not exist" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.785227 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.785272 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q58c\" (UniqueName: \"kubernetes.io/projected/0a7818ef-92f1-45d7-8e82-a12bd9e52025-kube-api-access-6q58c\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.785511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a7818ef-92f1-45d7-8e82-a12bd9e52025-logs\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.785642 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.785679 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-config-data\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.884713 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.888195 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.888253 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q58c\" (UniqueName: \"kubernetes.io/projected/0a7818ef-92f1-45d7-8e82-a12bd9e52025-kube-api-access-6q58c\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.888321 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a7818ef-92f1-45d7-8e82-a12bd9e52025-logs\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.888368 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.888392 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-config-data\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.889165 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a7818ef-92f1-45d7-8e82-a12bd9e52025-logs\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.895157 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-config-data\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.895869 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.900747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.907304 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q58c\" (UniqueName: \"kubernetes.io/projected/0a7818ef-92f1-45d7-8e82-a12bd9e52025-kube-api-access-6q58c\") pod \"nova-metadata-0\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.952262 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:53:33 crc kubenswrapper[4857]: I1128 13:53:33.995438 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5pjv\" (UniqueName: \"kubernetes.io/projected/818836f7-7e48-4477-9e79-41c71000929d-kube-api-access-p5pjv\") pod \"818836f7-7e48-4477-9e79-41c71000929d\" (UID: \"818836f7-7e48-4477-9e79-41c71000929d\") " Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.001887 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/818836f7-7e48-4477-9e79-41c71000929d-kube-api-access-p5pjv" (OuterVolumeSpecName: "kube-api-access-p5pjv") pod "818836f7-7e48-4477-9e79-41c71000929d" (UID: "818836f7-7e48-4477-9e79-41c71000929d"). InnerVolumeSpecName "kube-api-access-p5pjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.098972 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5pjv\" (UniqueName: \"kubernetes.io/projected/818836f7-7e48-4477-9e79-41c71000929d-kube-api-access-p5pjv\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.240260 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="276ec9eb-c7f6-4496-b301-49832c364c70" path="/var/lib/kubelet/pods/276ec9eb-c7f6-4496-b301-49832c364c70/volumes" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.241117 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f524461b-de2f-48eb-a378-c255d35327c4" path="/var/lib/kubelet/pods/f524461b-de2f-48eb-a378-c255d35327c4/volumes" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.381542 4857 generic.go:334] "Generic (PLEG): container finished" podID="818836f7-7e48-4477-9e79-41c71000929d" containerID="774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8" exitCode=2 Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.382002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"818836f7-7e48-4477-9e79-41c71000929d","Type":"ContainerDied","Data":"774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8"} Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.382704 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"818836f7-7e48-4477-9e79-41c71000929d","Type":"ContainerDied","Data":"0232edd093f05241b1f364f98e0c5082c0eb821cdaec53d2f097c0457534c9ff"} Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.382772 4857 scope.go:117] "RemoveContainer" containerID="774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.382060 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.421207 4857 scope.go:117] "RemoveContainer" containerID="774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.424833 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:53:34 crc kubenswrapper[4857]: E1128 13:53:34.425124 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8\": container with ID starting with 774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8 not found: ID does not exist" containerID="774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.425167 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8"} err="failed to get container status \"774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8\": rpc error: code = NotFound desc = could not find container \"774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8\": container with ID starting with 774bdff296f17d9390d03ac7580f3885a931f00f101b0f6a27e6c541f98075d8 not found: ID does not exist" Nov 28 13:53:34 crc kubenswrapper[4857]: W1128 13:53:34.426253 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a7818ef_92f1_45d7_8e82_a12bd9e52025.slice/crio-8d0a0c615680dd2bec0228932c38cf564382037c23ac8797cba8cbb2d585ff7a WatchSource:0}: Error finding container 8d0a0c615680dd2bec0228932c38cf564382037c23ac8797cba8cbb2d585ff7a: Status 404 returned error can't find the container with id 8d0a0c615680dd2bec0228932c38cf564382037c23ac8797cba8cbb2d585ff7a Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.444679 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.465506 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.474038 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:53:34 crc kubenswrapper[4857]: E1128 13:53:34.474494 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="818836f7-7e48-4477-9e79-41c71000929d" containerName="kube-state-metrics" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.474515 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="818836f7-7e48-4477-9e79-41c71000929d" containerName="kube-state-metrics" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.474691 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="818836f7-7e48-4477-9e79-41c71000929d" containerName="kube-state-metrics" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.475344 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.477414 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.477639 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.485506 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.610709 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.610865 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.610914 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.610949 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79cqm\" (UniqueName: \"kubernetes.io/projected/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-api-access-79cqm\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.712237 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79cqm\" (UniqueName: \"kubernetes.io/projected/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-api-access-79cqm\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.712287 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.712399 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.712444 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.718405 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.722800 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.725685 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.745115 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79cqm\" (UniqueName: \"kubernetes.io/projected/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-api-access-79cqm\") pod \"kube-state-metrics-0\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " pod="openstack/kube-state-metrics-0" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.870619 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:34 crc kubenswrapper[4857]: I1128 13:53:34.946804 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.004535 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019105 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gjh9j\" (UniqueName: \"kubernetes.io/projected/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-kube-api-access-gjh9j\") pod \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019197 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data\") pod \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019219 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xg4t\" (UniqueName: \"kubernetes.io/projected/d26ebe73-c4df-458d-a2f3-1da92d587632-kube-api-access-8xg4t\") pod \"d26ebe73-c4df-458d-a2f3-1da92d587632\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019287 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-scripts\") pod \"d26ebe73-c4df-458d-a2f3-1da92d587632\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019337 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-combined-ca-bundle\") pod \"d26ebe73-c4df-458d-a2f3-1da92d587632\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019426 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-combined-ca-bundle\") pod \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.019541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-config-data\") pod \"d26ebe73-c4df-458d-a2f3-1da92d587632\" (UID: \"d26ebe73-c4df-458d-a2f3-1da92d587632\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.023142 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-kube-api-access-gjh9j" (OuterVolumeSpecName: "kube-api-access-gjh9j") pod "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" (UID: "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d"). InnerVolumeSpecName "kube-api-access-gjh9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.023197 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d26ebe73-c4df-458d-a2f3-1da92d587632-kube-api-access-8xg4t" (OuterVolumeSpecName: "kube-api-access-8xg4t") pod "d26ebe73-c4df-458d-a2f3-1da92d587632" (UID: "d26ebe73-c4df-458d-a2f3-1da92d587632"). InnerVolumeSpecName "kube-api-access-8xg4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.026537 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-scripts" (OuterVolumeSpecName: "scripts") pod "d26ebe73-c4df-458d-a2f3-1da92d587632" (UID: "d26ebe73-c4df-458d-a2f3-1da92d587632"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: E1128 13:53:35.042827 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data podName:81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d nodeName:}" failed. No retries permitted until 2025-11-28 13:53:35.542796811 +0000 UTC m=+1465.666738248 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data") pod "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" (UID: "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d") : error deleting /var/lib/kubelet/pods/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d/volume-subpaths: remove /var/lib/kubelet/pods/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d/volume-subpaths: no such file or directory Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.046204 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" (UID: "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.046986 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-config-data" (OuterVolumeSpecName: "config-data") pod "d26ebe73-c4df-458d-a2f3-1da92d587632" (UID: "d26ebe73-c4df-458d-a2f3-1da92d587632"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.051006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d26ebe73-c4df-458d-a2f3-1da92d587632" (UID: "d26ebe73-c4df-458d-a2f3-1da92d587632"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.122435 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gjh9j\" (UniqueName: \"kubernetes.io/projected/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-kube-api-access-gjh9j\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.123179 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xg4t\" (UniqueName: \"kubernetes.io/projected/d26ebe73-c4df-458d-a2f3-1da92d587632-kube-api-access-8xg4t\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.123194 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.123207 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.123242 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.123253 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d26ebe73-c4df-458d-a2f3-1da92d587632-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.401338 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a7818ef-92f1-45d7-8e82-a12bd9e52025","Type":"ContainerStarted","Data":"d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215"} Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.401386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a7818ef-92f1-45d7-8e82-a12bd9e52025","Type":"ContainerStarted","Data":"9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158"} Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.401398 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a7818ef-92f1-45d7-8e82-a12bd9e52025","Type":"ContainerStarted","Data":"8d0a0c615680dd2bec0228932c38cf564382037c23ac8797cba8cbb2d585ff7a"} Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.405061 4857 generic.go:334] "Generic (PLEG): container finished" podID="81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" containerID="6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc" exitCode=0 Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.405138 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d","Type":"ContainerDied","Data":"6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc"} Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.405168 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d","Type":"ContainerDied","Data":"d226cf25dcf55d36564d86b871ceb37a6a1aac44dd237460f079b935574a85ee"} Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.405187 4857 scope.go:117] "RemoveContainer" containerID="6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.405310 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.407808 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-khxxx" event={"ID":"d26ebe73-c4df-458d-a2f3-1da92d587632","Type":"ContainerDied","Data":"204689276a1d6e5ed4ff6324d6b09811c5b39d6e6b874b34b77cacd9ac3f5442"} Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.407849 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="204689276a1d6e5ed4ff6324d6b09811c5b39d6e6b874b34b77cacd9ac3f5442" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.407937 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-khxxx" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.443569 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.443544356 podStartE2EDuration="2.443544356s" podCreationTimestamp="2025-11-28 13:53:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:35.426853427 +0000 UTC m=+1465.550794864" watchObservedRunningTime="2025-11-28 13:53:35.443544356 +0000 UTC m=+1465.567485793" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.456679 4857 scope.go:117] "RemoveContainer" containerID="6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc" Nov 28 13:53:35 crc kubenswrapper[4857]: E1128 13:53:35.457459 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc\": container with ID starting with 6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc not found: ID does not exist" containerID="6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.457490 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc"} err="failed to get container status \"6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc\": rpc error: code = NotFound desc = could not find container \"6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc\": container with ID starting with 6eca3260c1b0bdbf12aa4493a7c10635581ce38d699e243a4c0ada74848cd5bc not found: ID does not exist" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.485252 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.504745 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.512917 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: E1128 13:53:35.526495 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d26ebe73-c4df-458d-a2f3-1da92d587632" containerName="nova-cell1-conductor-db-sync" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.526549 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d26ebe73-c4df-458d-a2f3-1da92d587632" containerName="nova-cell1-conductor-db-sync" Nov 28 13:53:35 crc kubenswrapper[4857]: E1128 13:53:35.526636 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" containerName="nova-scheduler-scheduler" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.526646 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" containerName="nova-scheduler-scheduler" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.527008 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d26ebe73-c4df-458d-a2f3-1da92d587632" containerName="nova-cell1-conductor-db-sync" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.527046 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" containerName="nova-scheduler-scheduler" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.527801 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.527905 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.530900 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.634720 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data\") pod \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\" (UID: \"81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d\") " Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.635228 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.635348 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vr55k\" (UniqueName: \"kubernetes.io/projected/61549c8e-2955-4350-9055-731ceb896fdc-kube-api-access-vr55k\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.635418 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.640821 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data" (OuterVolumeSpecName: "config-data") pod "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" (UID: "81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.737419 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vr55k\" (UniqueName: \"kubernetes.io/projected/61549c8e-2955-4350-9055-731ceb896fdc-kube-api-access-vr55k\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.737494 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.737563 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.737613 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.741576 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.747021 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.759053 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.763196 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.773133 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.774103 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vr55k\" (UniqueName: \"kubernetes.io/projected/61549c8e-2955-4350-9055-731ceb896fdc-kube-api-access-vr55k\") pod \"nova-cell1-conductor-0\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.774401 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.776538 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.813723 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.825693 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.826020 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-central-agent" containerID="cri-o://3550e137178d18c59d49e08f8ba96f4865ee8708db2f6e837fe17fedaeee3539" gracePeriod=30 Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.826558 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="proxy-httpd" containerID="cri-o://b739607a7d3bed26bfdef1e161fe7a512799ebf330a44835b6eebf8c36ffb70a" gracePeriod=30 Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.826621 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="sg-core" containerID="cri-o://625f21229df21e17ad567862d2f50eae24cbad68ce3c927378a2c02395fc6d18" gracePeriod=30 Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.826663 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-notification-agent" containerID="cri-o://7d6b6e7e5e9cc36ce54a8b4ffd83d724c615708f5364d1ae2e912b47662fbf4c" gracePeriod=30 Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.847996 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.941198 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.941757 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-config-data\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:35 crc kubenswrapper[4857]: I1128 13:53:35.941816 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9svlk\" (UniqueName: \"kubernetes.io/projected/f52472a6-ce63-4192-9ef4-3d77b633ef70-kube-api-access-9svlk\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.044007 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-config-data\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.044058 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9svlk\" (UniqueName: \"kubernetes.io/projected/f52472a6-ce63-4192-9ef4-3d77b633ef70-kube-api-access-9svlk\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.044081 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.049955 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.050447 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-config-data\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.060035 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9svlk\" (UniqueName: \"kubernetes.io/projected/f52472a6-ce63-4192-9ef4-3d77b633ef70-kube-api-access-9svlk\") pod \"nova-scheduler-0\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.141510 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.243613 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="818836f7-7e48-4477-9e79-41c71000929d" path="/var/lib/kubelet/pods/818836f7-7e48-4477-9e79-41c71000929d/volumes" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.245054 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d" path="/var/lib/kubelet/pods/81d130e8-7e9d-4c3f-9ce3-d1b7b90e657d/volumes" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.289362 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.436293 4857 generic.go:334] "Generic (PLEG): container finished" podID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerID="b739607a7d3bed26bfdef1e161fe7a512799ebf330a44835b6eebf8c36ffb70a" exitCode=0 Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.436630 4857 generic.go:334] "Generic (PLEG): container finished" podID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerID="625f21229df21e17ad567862d2f50eae24cbad68ce3c927378a2c02395fc6d18" exitCode=2 Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.436641 4857 generic.go:334] "Generic (PLEG): container finished" podID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerID="3550e137178d18c59d49e08f8ba96f4865ee8708db2f6e837fe17fedaeee3539" exitCode=0 Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.436682 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerDied","Data":"b739607a7d3bed26bfdef1e161fe7a512799ebf330a44835b6eebf8c36ffb70a"} Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.436712 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerDied","Data":"625f21229df21e17ad567862d2f50eae24cbad68ce3c927378a2c02395fc6d18"} Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.436723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerDied","Data":"3550e137178d18c59d49e08f8ba96f4865ee8708db2f6e837fe17fedaeee3539"} Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.440197 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45","Type":"ContainerStarted","Data":"89b75be03709a9433e326422eced730b8165104d804c031618aebe74ce32bcdd"} Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.440225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45","Type":"ContainerStarted","Data":"1fdca73510f3a2dc5f166a8adda9ecf23dd2406cb1bad0e449a32ebe10b5db5e"} Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.441371 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.443919 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"61549c8e-2955-4350-9055-731ceb896fdc","Type":"ContainerStarted","Data":"dc1f1c9f56c54cb442c097516f59bd8065ffb079f5fa0810142019e28c63167b"} Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.475400 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.991221421 podStartE2EDuration="2.475377193s" podCreationTimestamp="2025-11-28 13:53:34 +0000 UTC" firstStartedPulling="2025-11-28 13:53:35.504478531 +0000 UTC m=+1465.628419968" lastFinishedPulling="2025-11-28 13:53:35.988634303 +0000 UTC m=+1466.112575740" observedRunningTime="2025-11-28 13:53:36.463059058 +0000 UTC m=+1466.587000495" watchObservedRunningTime="2025-11-28 13:53:36.475377193 +0000 UTC m=+1466.599318630" Nov 28 13:53:36 crc kubenswrapper[4857]: W1128 13:53:36.636793 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf52472a6_ce63_4192_9ef4_3d77b633ef70.slice/crio-b94f09916edb92e39a9d434c1f407e3e18165f544275f68fc18124c0cc2b852f WatchSource:0}: Error finding container b94f09916edb92e39a9d434c1f407e3e18165f544275f68fc18124c0cc2b852f: Status 404 returned error can't find the container with id b94f09916edb92e39a9d434c1f407e3e18165f544275f68fc18124c0cc2b852f Nov 28 13:53:36 crc kubenswrapper[4857]: I1128 13:53:36.642048 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:53:37 crc kubenswrapper[4857]: I1128 13:53:37.455486 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52472a6-ce63-4192-9ef4-3d77b633ef70","Type":"ContainerStarted","Data":"9f84f9d0085e2e21c564f7ffe093e88ebd9a92661819a1d7ef85e3d19ef948f0"} Nov 28 13:53:37 crc kubenswrapper[4857]: I1128 13:53:37.455855 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52472a6-ce63-4192-9ef4-3d77b633ef70","Type":"ContainerStarted","Data":"b94f09916edb92e39a9d434c1f407e3e18165f544275f68fc18124c0cc2b852f"} Nov 28 13:53:37 crc kubenswrapper[4857]: I1128 13:53:37.459278 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"61549c8e-2955-4350-9055-731ceb896fdc","Type":"ContainerStarted","Data":"ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6"} Nov 28 13:53:37 crc kubenswrapper[4857]: I1128 13:53:37.459327 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:37 crc kubenswrapper[4857]: I1128 13:53:37.527465 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.5274461219999997 podStartE2EDuration="2.527446122s" podCreationTimestamp="2025-11-28 13:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:37.489410631 +0000 UTC m=+1467.613352068" watchObservedRunningTime="2025-11-28 13:53:37.527446122 +0000 UTC m=+1467.651387559" Nov 28 13:53:37 crc kubenswrapper[4857]: I1128 13:53:37.527653 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.527649358 podStartE2EDuration="2.527649358s" podCreationTimestamp="2025-11-28 13:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:37.507587029 +0000 UTC m=+1467.631528466" watchObservedRunningTime="2025-11-28 13:53:37.527649358 +0000 UTC m=+1467.651590795" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.469300 4857 generic.go:334] "Generic (PLEG): container finished" podID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerID="888fe2c7fb9a72d11577c72ea2a6f6697c1a76294064e4ee884fb8613fd8badd" exitCode=0 Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.469645 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c56933f-fbe9-479a-aa1e-dbf699eb13af","Type":"ContainerDied","Data":"888fe2c7fb9a72d11577c72ea2a6f6697c1a76294064e4ee884fb8613fd8badd"} Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.469671 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"6c56933f-fbe9-479a-aa1e-dbf699eb13af","Type":"ContainerDied","Data":"68f820573b53262decbca0a2ebdd1949c1b49581f4ab1678e80b231599fc8e1d"} Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.469681 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68f820573b53262decbca0a2ebdd1949c1b49581f4ab1678e80b231599fc8e1d" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.472754 4857 generic.go:334] "Generic (PLEG): container finished" podID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerID="7d6b6e7e5e9cc36ce54a8b4ffd83d724c615708f5364d1ae2e912b47662fbf4c" exitCode=0 Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.473534 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerDied","Data":"7d6b6e7e5e9cc36ce54a8b4ffd83d724c615708f5364d1ae2e912b47662fbf4c"} Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.473560 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a3a426a9-b190-45d1-8ded-d859d930aa46","Type":"ContainerDied","Data":"e7c6a1907049909795fea39d2f876f5baf21da9d59f56988e89c435c9cfbfe0e"} Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.473569 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7c6a1907049909795fea39d2f876f5baf21da9d59f56988e89c435c9cfbfe0e" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.474920 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.481684 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.640996 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-combined-ca-bundle\") pod \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641063 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-combined-ca-bundle\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641159 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9vkb\" (UniqueName: \"kubernetes.io/projected/a3a426a9-b190-45d1-8ded-d859d930aa46-kube-api-access-w9vkb\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641189 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-scripts\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641224 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-log-httpd\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641301 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-config-data\") pod \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-run-httpd\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zlrp\" (UniqueName: \"kubernetes.io/projected/6c56933f-fbe9-479a-aa1e-dbf699eb13af-kube-api-access-8zlrp\") pod \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641408 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-config-data\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641431 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c56933f-fbe9-479a-aa1e-dbf699eb13af-logs\") pod \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\" (UID: \"6c56933f-fbe9-479a-aa1e-dbf699eb13af\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.641464 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-sg-core-conf-yaml\") pod \"a3a426a9-b190-45d1-8ded-d859d930aa46\" (UID: \"a3a426a9-b190-45d1-8ded-d859d930aa46\") " Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.642079 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.642207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.642936 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c56933f-fbe9-479a-aa1e-dbf699eb13af-logs" (OuterVolumeSpecName: "logs") pod "6c56933f-fbe9-479a-aa1e-dbf699eb13af" (UID: "6c56933f-fbe9-479a-aa1e-dbf699eb13af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.644143 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.644297 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c56933f-fbe9-479a-aa1e-dbf699eb13af-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.644479 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a3a426a9-b190-45d1-8ded-d859d930aa46-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.652186 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c56933f-fbe9-479a-aa1e-dbf699eb13af-kube-api-access-8zlrp" (OuterVolumeSpecName: "kube-api-access-8zlrp") pod "6c56933f-fbe9-479a-aa1e-dbf699eb13af" (UID: "6c56933f-fbe9-479a-aa1e-dbf699eb13af"). InnerVolumeSpecName "kube-api-access-8zlrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.653631 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-scripts" (OuterVolumeSpecName: "scripts") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.681242 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3a426a9-b190-45d1-8ded-d859d930aa46-kube-api-access-w9vkb" (OuterVolumeSpecName: "kube-api-access-w9vkb") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "kube-api-access-w9vkb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.683344 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-config-data" (OuterVolumeSpecName: "config-data") pod "6c56933f-fbe9-479a-aa1e-dbf699eb13af" (UID: "6c56933f-fbe9-479a-aa1e-dbf699eb13af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.684296 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.685443 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c56933f-fbe9-479a-aa1e-dbf699eb13af" (UID: "6c56933f-fbe9-479a-aa1e-dbf699eb13af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.735848 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745792 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745822 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745831 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745840 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9vkb\" (UniqueName: \"kubernetes.io/projected/a3a426a9-b190-45d1-8ded-d859d930aa46-kube-api-access-w9vkb\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745850 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745858 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c56933f-fbe9-479a-aa1e-dbf699eb13af-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.745867 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zlrp\" (UniqueName: \"kubernetes.io/projected/6c56933f-fbe9-479a-aa1e-dbf699eb13af-kube-api-access-8zlrp\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.756461 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-config-data" (OuterVolumeSpecName: "config-data") pod "a3a426a9-b190-45d1-8ded-d859d930aa46" (UID: "a3a426a9-b190-45d1-8ded-d859d930aa46"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.847806 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a3a426a9-b190-45d1-8ded-d859d930aa46-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.954148 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:53:38 crc kubenswrapper[4857]: I1128 13:53:38.954612 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.144070 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5vzz2"] Nov 28 13:53:39 crc kubenswrapper[4857]: E1128 13:53:39.144733 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-central-agent" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.144805 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-central-agent" Nov 28 13:53:39 crc kubenswrapper[4857]: E1128 13:53:39.145029 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="proxy-httpd" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.145131 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="proxy-httpd" Nov 28 13:53:39 crc kubenswrapper[4857]: E1128 13:53:39.145247 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-log" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.145315 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-log" Nov 28 13:53:39 crc kubenswrapper[4857]: E1128 13:53:39.145382 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="sg-core" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.145476 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="sg-core" Nov 28 13:53:39 crc kubenswrapper[4857]: E1128 13:53:39.145556 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-notification-agent" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.145621 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-notification-agent" Nov 28 13:53:39 crc kubenswrapper[4857]: E1128 13:53:39.145678 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-api" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.145764 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-api" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.146053 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-notification-agent" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.146132 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-api" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.146212 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="ceilometer-central-agent" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.146335 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="proxy-httpd" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.146443 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" containerName="sg-core" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.146584 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" containerName="nova-api-log" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.148658 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.176167 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5vzz2"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.255917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxp9t\" (UniqueName: \"kubernetes.io/projected/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-kube-api-access-bxp9t\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.256387 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-utilities\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.256531 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-catalog-content\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.359080 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-utilities\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.359163 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-catalog-content\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.359291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxp9t\" (UniqueName: \"kubernetes.io/projected/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-kube-api-access-bxp9t\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.360551 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-utilities\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.361480 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-catalog-content\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.384806 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxp9t\" (UniqueName: \"kubernetes.io/projected/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-kube-api-access-bxp9t\") pod \"redhat-operators-5vzz2\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.473233 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.490586 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.491057 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.618329 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.627605 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.646007 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.662878 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.667890 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.685666 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.691447 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.692939 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.698859 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.736622 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.741533 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.746569 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.778412 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.790482 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.876746 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.876798 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877032 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-log-httpd\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877106 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64ab5843-1111-459f-81c5-12010ea28156-logs\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877284 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ld57p\" (UniqueName: \"kubernetes.io/projected/8c3e623f-697a-4a99-b364-dc4ade09c3d0-kube-api-access-ld57p\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877485 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-scripts\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877518 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-config-data\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877610 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-run-httpd\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877633 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-config-data\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877654 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49448\" (UniqueName: \"kubernetes.io/projected/64ab5843-1111-459f-81c5-12010ea28156-kube-api-access-49448\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.877677 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.979885 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-log-httpd\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64ab5843-1111-459f-81c5-12010ea28156-logs\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980304 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ld57p\" (UniqueName: \"kubernetes.io/projected/8c3e623f-697a-4a99-b364-dc4ade09c3d0-kube-api-access-ld57p\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980407 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-scripts\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-config-data\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980583 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980640 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-log-httpd\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980697 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64ab5843-1111-459f-81c5-12010ea28156-logs\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980790 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-run-httpd\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.980891 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49448\" (UniqueName: \"kubernetes.io/projected/64ab5843-1111-459f-81c5-12010ea28156-kube-api-access-49448\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.981015 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-config-data\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.981118 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.981293 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.981412 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.981866 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-run-httpd\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.987392 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.988443 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-config-data\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.989532 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-config-data\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.989826 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.991579 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-scripts\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.994497 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.998198 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:39 crc kubenswrapper[4857]: I1128 13:53:39.999075 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5vzz2"] Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.002902 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ld57p\" (UniqueName: \"kubernetes.io/projected/8c3e623f-697a-4a99-b364-dc4ade09c3d0-kube-api-access-ld57p\") pod \"ceilometer-0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " pod="openstack/ceilometer-0" Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.003384 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49448\" (UniqueName: \"kubernetes.io/projected/64ab5843-1111-459f-81c5-12010ea28156-kube-api-access-49448\") pod \"nova-api-0\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " pod="openstack/nova-api-0" Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.031642 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.090245 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.248182 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c56933f-fbe9-479a-aa1e-dbf699eb13af" path="/var/lib/kubelet/pods/6c56933f-fbe9-479a-aa1e-dbf699eb13af/volumes" Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.249170 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a3a426a9-b190-45d1-8ded-d859d930aa46" path="/var/lib/kubelet/pods/a3a426a9-b190-45d1-8ded-d859d930aa46/volumes" Nov 28 13:53:40 crc kubenswrapper[4857]: W1128 13:53:40.496021 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c3e623f_697a_4a99_b364_dc4ade09c3d0.slice/crio-6658c6de5ff28ebd90640177beec5dbbdc5c7d2236eacc2246f3e84d7bbda1ab WatchSource:0}: Error finding container 6658c6de5ff28ebd90640177beec5dbbdc5c7d2236eacc2246f3e84d7bbda1ab: Status 404 returned error can't find the container with id 6658c6de5ff28ebd90640177beec5dbbdc5c7d2236eacc2246f3e84d7bbda1ab Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.502906 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.508543 4857 generic.go:334] "Generic (PLEG): container finished" podID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerID="dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53" exitCode=0 Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.508584 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerDied","Data":"dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53"} Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.508610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerStarted","Data":"60a3ea1bf53c5e329864a8a84d14cbf6b3523b3c00b92e93f08adf51b7ea9071"} Nov 28 13:53:40 crc kubenswrapper[4857]: W1128 13:53:40.604851 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod64ab5843_1111_459f_81c5_12010ea28156.slice/crio-b538ef043517fd58d78ab8089e04d63bfacb10b481e64602b124c7d543f9670f WatchSource:0}: Error finding container b538ef043517fd58d78ab8089e04d63bfacb10b481e64602b124c7d543f9670f: Status 404 returned error can't find the container with id b538ef043517fd58d78ab8089e04d63bfacb10b481e64602b124c7d543f9670f Nov 28 13:53:40 crc kubenswrapper[4857]: I1128 13:53:40.605631 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.141967 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.518163 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerStarted","Data":"8a760445847bc1acc806a88969d2e416dafa158955c11eb113300fb847bb776f"} Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.518507 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerStarted","Data":"6658c6de5ff28ebd90640177beec5dbbdc5c7d2236eacc2246f3e84d7bbda1ab"} Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.521280 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64ab5843-1111-459f-81c5-12010ea28156","Type":"ContainerStarted","Data":"1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c"} Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.521314 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64ab5843-1111-459f-81c5-12010ea28156","Type":"ContainerStarted","Data":"091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860"} Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.521326 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64ab5843-1111-459f-81c5-12010ea28156","Type":"ContainerStarted","Data":"b538ef043517fd58d78ab8089e04d63bfacb10b481e64602b124c7d543f9670f"} Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.523713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerStarted","Data":"4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c"} Nov 28 13:53:41 crc kubenswrapper[4857]: I1128 13:53:41.558197 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.5581790140000003 podStartE2EDuration="2.558179014s" podCreationTimestamp="2025-11-28 13:53:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:41.537962441 +0000 UTC m=+1471.661903878" watchObservedRunningTime="2025-11-28 13:53:41.558179014 +0000 UTC m=+1471.682120451" Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.535652 4857 generic.go:334] "Generic (PLEG): container finished" podID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerID="4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c" exitCode=0 Nov 28 13:53:42 crc kubenswrapper[4857]: I1128 13:53:42.536145 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerDied","Data":"4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c"} Nov 28 13:53:43 crc kubenswrapper[4857]: I1128 13:53:43.545857 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerStarted","Data":"9d5cf7d0ea097dc6c2fa0cb799bb6dd66a4c34069e456f7068e8473181139014"} Nov 28 13:53:43 crc kubenswrapper[4857]: I1128 13:53:43.953437 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:53:43 crc kubenswrapper[4857]: I1128 13:53:43.953876 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.556679 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerStarted","Data":"80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868"} Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.558675 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerStarted","Data":"817a2b90f431370e1d9271ce616a9a77b4b12aa5cdb83a686aa197fc0dc0b533"} Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.585106 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5vzz2" podStartSLOduration=2.80368225 podStartE2EDuration="5.585085306s" podCreationTimestamp="2025-11-28 13:53:39 +0000 UTC" firstStartedPulling="2025-11-28 13:53:40.510232053 +0000 UTC m=+1470.634173490" lastFinishedPulling="2025-11-28 13:53:43.291635119 +0000 UTC m=+1473.415576546" observedRunningTime="2025-11-28 13:53:44.574434596 +0000 UTC m=+1474.698376073" watchObservedRunningTime="2025-11-28 13:53:44.585085306 +0000 UTC m=+1474.709026733" Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.967312 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:44 crc kubenswrapper[4857]: I1128 13:53:44.967784 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:45 crc kubenswrapper[4857]: I1128 13:53:45.016292 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 13:53:45 crc kubenswrapper[4857]: I1128 13:53:45.884528 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.142382 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.183632 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.527219 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jgx9f"] Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.529751 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.542920 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jgx9f"] Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.594325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerStarted","Data":"c313ea57797c6b2111c64aa9c6673d37933e1fd8510d6832d2e38d964d052b20"} Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.594372 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.616206 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92kfn\" (UniqueName: \"kubernetes.io/projected/5c21ba48-6d3a-4d00-bc9d-01eff9227731-kube-api-access-92kfn\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.616422 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-utilities\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.616578 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-catalog-content\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.636312 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.636576 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5362794859999997 podStartE2EDuration="7.636559137s" podCreationTimestamp="2025-11-28 13:53:39 +0000 UTC" firstStartedPulling="2025-11-28 13:53:40.501445691 +0000 UTC m=+1470.625387128" lastFinishedPulling="2025-11-28 13:53:45.601725352 +0000 UTC m=+1475.725666779" observedRunningTime="2025-11-28 13:53:46.636518716 +0000 UTC m=+1476.760460163" watchObservedRunningTime="2025-11-28 13:53:46.636559137 +0000 UTC m=+1476.760500574" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.718283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-utilities\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.718859 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-utilities\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.719042 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-catalog-content\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.719767 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-catalog-content\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.721036 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92kfn\" (UniqueName: \"kubernetes.io/projected/5c21ba48-6d3a-4d00-bc9d-01eff9227731-kube-api-access-92kfn\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.749538 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92kfn\" (UniqueName: \"kubernetes.io/projected/5c21ba48-6d3a-4d00-bc9d-01eff9227731-kube-api-access-92kfn\") pod \"redhat-marketplace-jgx9f\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:46 crc kubenswrapper[4857]: I1128 13:53:46.901487 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:47 crc kubenswrapper[4857]: I1128 13:53:47.390452 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jgx9f"] Nov 28 13:53:47 crc kubenswrapper[4857]: I1128 13:53:47.605040 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerStarted","Data":"223e0cb6977871d78f91553240d26da2284f2abfdddfaa5ff68cfcf49d1e837b"} Nov 28 13:53:48 crc kubenswrapper[4857]: I1128 13:53:48.614252 4857 generic.go:334] "Generic (PLEG): container finished" podID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerID="bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa" exitCode=0 Nov 28 13:53:48 crc kubenswrapper[4857]: I1128 13:53:48.614463 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerDied","Data":"bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa"} Nov 28 13:53:49 crc kubenswrapper[4857]: I1128 13:53:49.474371 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:49 crc kubenswrapper[4857]: I1128 13:53:49.474680 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:49 crc kubenswrapper[4857]: I1128 13:53:49.628447 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerStarted","Data":"d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4"} Nov 28 13:53:50 crc kubenswrapper[4857]: I1128 13:53:50.091343 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:53:50 crc kubenswrapper[4857]: I1128 13:53:50.091584 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:53:50 crc kubenswrapper[4857]: I1128 13:53:50.555576 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5vzz2" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="registry-server" probeResult="failure" output=< Nov 28 13:53:50 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 13:53:50 crc kubenswrapper[4857]: > Nov 28 13:53:50 crc kubenswrapper[4857]: I1128 13:53:50.640676 4857 generic.go:334] "Generic (PLEG): container finished" podID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerID="d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4" exitCode=0 Nov 28 13:53:50 crc kubenswrapper[4857]: I1128 13:53:50.640840 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerDied","Data":"d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4"} Nov 28 13:53:51 crc kubenswrapper[4857]: I1128 13:53:51.173365 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:51 crc kubenswrapper[4857]: I1128 13:53:51.173359 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.196:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:53:51 crc kubenswrapper[4857]: I1128 13:53:51.656718 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerStarted","Data":"c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32"} Nov 28 13:53:51 crc kubenswrapper[4857]: I1128 13:53:51.698475 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jgx9f" podStartSLOduration=3.076923362 podStartE2EDuration="5.698438357s" podCreationTimestamp="2025-11-28 13:53:46 +0000 UTC" firstStartedPulling="2025-11-28 13:53:48.616164397 +0000 UTC m=+1478.740105834" lastFinishedPulling="2025-11-28 13:53:51.237679392 +0000 UTC m=+1481.361620829" observedRunningTime="2025-11-28 13:53:51.685589789 +0000 UTC m=+1481.809531226" watchObservedRunningTime="2025-11-28 13:53:51.698438357 +0000 UTC m=+1481.822379794" Nov 28 13:53:53 crc kubenswrapper[4857]: I1128 13:53:53.961161 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:53:53 crc kubenswrapper[4857]: I1128 13:53:53.961714 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:53:53 crc kubenswrapper[4857]: I1128 13:53:53.967478 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:53:53 crc kubenswrapper[4857]: I1128 13:53:53.969336 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:53:56 crc kubenswrapper[4857]: I1128 13:53:56.710058 4857 generic.go:334] "Generic (PLEG): container finished" podID="a6012ba9-a7cb-422d-b120-7699dff9658b" containerID="c4a145c6ca168dab4a4a683d69fa5ef3bd70e427cb8b2504227dc729ef8eb648" exitCode=137 Nov 28 13:53:56 crc kubenswrapper[4857]: I1128 13:53:56.710138 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a6012ba9-a7cb-422d-b120-7699dff9658b","Type":"ContainerDied","Data":"c4a145c6ca168dab4a4a683d69fa5ef3bd70e427cb8b2504227dc729ef8eb648"} Nov 28 13:53:56 crc kubenswrapper[4857]: I1128 13:53:56.903126 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:56 crc kubenswrapper[4857]: I1128 13:53:56.903282 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:56 crc kubenswrapper[4857]: I1128 13:53:56.966630 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.214709 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.249863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jcdqk\" (UniqueName: \"kubernetes.io/projected/a6012ba9-a7cb-422d-b120-7699dff9658b-kube-api-access-jcdqk\") pod \"a6012ba9-a7cb-422d-b120-7699dff9658b\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.249916 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-combined-ca-bundle\") pod \"a6012ba9-a7cb-422d-b120-7699dff9658b\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.250091 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-config-data\") pod \"a6012ba9-a7cb-422d-b120-7699dff9658b\" (UID: \"a6012ba9-a7cb-422d-b120-7699dff9658b\") " Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.255204 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6012ba9-a7cb-422d-b120-7699dff9658b-kube-api-access-jcdqk" (OuterVolumeSpecName: "kube-api-access-jcdqk") pod "a6012ba9-a7cb-422d-b120-7699dff9658b" (UID: "a6012ba9-a7cb-422d-b120-7699dff9658b"). InnerVolumeSpecName "kube-api-access-jcdqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.281092 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a6012ba9-a7cb-422d-b120-7699dff9658b" (UID: "a6012ba9-a7cb-422d-b120-7699dff9658b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.286830 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-config-data" (OuterVolumeSpecName: "config-data") pod "a6012ba9-a7cb-422d-b120-7699dff9658b" (UID: "a6012ba9-a7cb-422d-b120-7699dff9658b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.351835 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.351876 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jcdqk\" (UniqueName: \"kubernetes.io/projected/a6012ba9-a7cb-422d-b120-7699dff9658b-kube-api-access-jcdqk\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.351891 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a6012ba9-a7cb-422d-b120-7699dff9658b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.723266 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.723265 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"a6012ba9-a7cb-422d-b120-7699dff9658b","Type":"ContainerDied","Data":"a45507747e50cfdd8070da9bb7d03e670aa203942aa5eea599fdf7046b84b369"} Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.723358 4857 scope.go:117] "RemoveContainer" containerID="c4a145c6ca168dab4a4a683d69fa5ef3bd70e427cb8b2504227dc729ef8eb648" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.767261 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.788313 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.806421 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:57 crc kubenswrapper[4857]: E1128 13:53:57.806913 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6012ba9-a7cb-422d-b120-7699dff9658b" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.806936 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6012ba9-a7cb-422d-b120-7699dff9658b" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.807196 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6012ba9-a7cb-422d-b120-7699dff9658b" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.808051 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.815730 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.815829 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.815917 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.818483 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.836726 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.862304 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.862360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.862383 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.862510 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwb58\" (UniqueName: \"kubernetes.io/projected/2bf149d2-9beb-4394-921a-a703473391aa-kube-api-access-hwb58\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.862533 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.887971 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jgx9f"] Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.964603 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.964666 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.964694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.964865 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwb58\" (UniqueName: \"kubernetes.io/projected/2bf149d2-9beb-4394-921a-a703473391aa-kube-api-access-hwb58\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.964900 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.973717 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.973747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.974195 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.974219 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:57 crc kubenswrapper[4857]: I1128 13:53:57.982747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwb58\" (UniqueName: \"kubernetes.io/projected/2bf149d2-9beb-4394-921a-a703473391aa-kube-api-access-hwb58\") pod \"nova-cell1-novncproxy-0\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:58 crc kubenswrapper[4857]: I1128 13:53:58.147511 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:53:58 crc kubenswrapper[4857]: I1128 13:53:58.258765 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a6012ba9-a7cb-422d-b120-7699dff9658b" path="/var/lib/kubelet/pods/a6012ba9-a7cb-422d-b120-7699dff9658b/volumes" Nov 28 13:53:58 crc kubenswrapper[4857]: I1128 13:53:58.581195 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:53:58 crc kubenswrapper[4857]: W1128 13:53:58.583853 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bf149d2_9beb_4394_921a_a703473391aa.slice/crio-1e17fe176054cb303258a70c4f1160d67735a4a6c043e7790ba9bb040e333179 WatchSource:0}: Error finding container 1e17fe176054cb303258a70c4f1160d67735a4a6c043e7790ba9bb040e333179: Status 404 returned error can't find the container with id 1e17fe176054cb303258a70c4f1160d67735a4a6c043e7790ba9bb040e333179 Nov 28 13:53:58 crc kubenswrapper[4857]: I1128 13:53:58.734930 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2bf149d2-9beb-4394-921a-a703473391aa","Type":"ContainerStarted","Data":"1e17fe176054cb303258a70c4f1160d67735a4a6c043e7790ba9bb040e333179"} Nov 28 13:53:59 crc kubenswrapper[4857]: I1128 13:53:59.549706 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:59 crc kubenswrapper[4857]: I1128 13:53:59.615360 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:53:59 crc kubenswrapper[4857]: I1128 13:53:59.749041 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2bf149d2-9beb-4394-921a-a703473391aa","Type":"ContainerStarted","Data":"2cd656be1985a74adb1ed52d510cb94e3e9f9d8ec5011e4fd68bc155cf37553b"} Nov 28 13:53:59 crc kubenswrapper[4857]: I1128 13:53:59.749228 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jgx9f" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="registry-server" containerID="cri-o://c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32" gracePeriod=2 Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.097702 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.099400 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.100650 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.102741 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.119079 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.119054838 podStartE2EDuration="3.119054838s" podCreationTimestamp="2025-11-28 13:53:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:53:59.778391827 +0000 UTC m=+1489.902333274" watchObservedRunningTime="2025-11-28 13:54:00.119054838 +0000 UTC m=+1490.242996295" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.255207 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.307234 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92kfn\" (UniqueName: \"kubernetes.io/projected/5c21ba48-6d3a-4d00-bc9d-01eff9227731-kube-api-access-92kfn\") pod \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.308347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-utilities\") pod \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.308453 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-catalog-content\") pod \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\" (UID: \"5c21ba48-6d3a-4d00-bc9d-01eff9227731\") " Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.309820 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-utilities" (OuterVolumeSpecName: "utilities") pod "5c21ba48-6d3a-4d00-bc9d-01eff9227731" (UID: "5c21ba48-6d3a-4d00-bc9d-01eff9227731"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.313178 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c21ba48-6d3a-4d00-bc9d-01eff9227731-kube-api-access-92kfn" (OuterVolumeSpecName: "kube-api-access-92kfn") pod "5c21ba48-6d3a-4d00-bc9d-01eff9227731" (UID: "5c21ba48-6d3a-4d00-bc9d-01eff9227731"). InnerVolumeSpecName "kube-api-access-92kfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.328983 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c21ba48-6d3a-4d00-bc9d-01eff9227731" (UID: "5c21ba48-6d3a-4d00-bc9d-01eff9227731"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.411594 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92kfn\" (UniqueName: \"kubernetes.io/projected/5c21ba48-6d3a-4d00-bc9d-01eff9227731-kube-api-access-92kfn\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.411641 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.411655 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c21ba48-6d3a-4d00-bc9d-01eff9227731-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.765776 4857 generic.go:334] "Generic (PLEG): container finished" podID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerID="c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32" exitCode=0 Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.765893 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jgx9f" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.765892 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerDied","Data":"c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32"} Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.766157 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jgx9f" event={"ID":"5c21ba48-6d3a-4d00-bc9d-01eff9227731","Type":"ContainerDied","Data":"223e0cb6977871d78f91553240d26da2284f2abfdddfaa5ff68cfcf49d1e837b"} Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.766194 4857 scope.go:117] "RemoveContainer" containerID="c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.766866 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.770660 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.798764 4857 scope.go:117] "RemoveContainer" containerID="d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.833447 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5vzz2"] Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.833747 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5vzz2" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="registry-server" containerID="cri-o://80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868" gracePeriod=2 Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.852906 4857 scope.go:117] "RemoveContainer" containerID="bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.869559 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jgx9f"] Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.886160 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jgx9f"] Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.949243 4857 scope.go:117] "RemoveContainer" containerID="c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32" Nov 28 13:54:00 crc kubenswrapper[4857]: E1128 13:54:00.954096 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32\": container with ID starting with c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32 not found: ID does not exist" containerID="c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.954143 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32"} err="failed to get container status \"c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32\": rpc error: code = NotFound desc = could not find container \"c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32\": container with ID starting with c4b708adf718f92fb88fc938c05e9cbafbd4153918b9af8cdf8e5b1844d52e32 not found: ID does not exist" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.954168 4857 scope.go:117] "RemoveContainer" containerID="d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4" Nov 28 13:54:00 crc kubenswrapper[4857]: E1128 13:54:00.956970 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4\": container with ID starting with d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4 not found: ID does not exist" containerID="d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.957014 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4"} err="failed to get container status \"d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4\": rpc error: code = NotFound desc = could not find container \"d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4\": container with ID starting with d08fac611cf09e2a469aad884bcf53b54647e1c386e55bb3360f387ab127eed4 not found: ID does not exist" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.957044 4857 scope.go:117] "RemoveContainer" containerID="bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa" Nov 28 13:54:00 crc kubenswrapper[4857]: E1128 13:54:00.963635 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa\": container with ID starting with bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa not found: ID does not exist" containerID="bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.963672 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa"} err="failed to get container status \"bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa\": rpc error: code = NotFound desc = could not find container \"bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa\": container with ID starting with bca9de347fab9653e2f0e07059ff94a76f807e8c36e38a9b1bc5eb44a643a5fa not found: ID does not exist" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.974812 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-7frjn"] Nov 28 13:54:00 crc kubenswrapper[4857]: E1128 13:54:00.975571 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="extract-utilities" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.975596 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="extract-utilities" Nov 28 13:54:00 crc kubenswrapper[4857]: E1128 13:54:00.975648 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="registry-server" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.975657 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="registry-server" Nov 28 13:54:00 crc kubenswrapper[4857]: E1128 13:54:00.975676 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="extract-content" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.975685 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="extract-content" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.975922 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" containerName="registry-server" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.977205 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:00 crc kubenswrapper[4857]: I1128 13:54:00.989080 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-7frjn"] Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.026021 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.026073 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.026107 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsz48\" (UniqueName: \"kubernetes.io/projected/bf608325-3713-4ff6-8e16-c7993618ef71-kube-api-access-jsz48\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.026162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-config\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.026221 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.026262 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.129370 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-config\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.129444 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.129475 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.129570 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.129587 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.129622 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsz48\" (UniqueName: \"kubernetes.io/projected/bf608325-3713-4ff6-8e16-c7993618ef71-kube-api-access-jsz48\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.130519 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-svc\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.130530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-config\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.130553 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-swift-storage-0\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.130603 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-sb\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.130680 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-nb\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.147896 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsz48\" (UniqueName: \"kubernetes.io/projected/bf608325-3713-4ff6-8e16-c7993618ef71-kube-api-access-jsz48\") pod \"dnsmasq-dns-cd5cbd7b9-7frjn\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.346013 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.499401 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.535367 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxp9t\" (UniqueName: \"kubernetes.io/projected/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-kube-api-access-bxp9t\") pod \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.535796 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-catalog-content\") pod \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.535915 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-utilities\") pod \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\" (UID: \"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c\") " Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.536826 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-utilities" (OuterVolumeSpecName: "utilities") pod "33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" (UID: "33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.543680 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-kube-api-access-bxp9t" (OuterVolumeSpecName: "kube-api-access-bxp9t") pod "33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" (UID: "33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c"). InnerVolumeSpecName "kube-api-access-bxp9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.639179 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.639211 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxp9t\" (UniqueName: \"kubernetes.io/projected/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-kube-api-access-bxp9t\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.674896 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" (UID: "33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.740973 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.780077 4857 generic.go:334] "Generic (PLEG): container finished" podID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerID="80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868" exitCode=0 Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.780197 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5vzz2" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.780264 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerDied","Data":"80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868"} Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.780348 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5vzz2" event={"ID":"33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c","Type":"ContainerDied","Data":"60a3ea1bf53c5e329864a8a84d14cbf6b3523b3c00b92e93f08adf51b7ea9071"} Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.780374 4857 scope.go:117] "RemoveContainer" containerID="80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.826540 4857 scope.go:117] "RemoveContainer" containerID="4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.841353 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5vzz2"] Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.859279 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5vzz2"] Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.871464 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-7frjn"] Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.876608 4857 scope.go:117] "RemoveContainer" containerID="dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53" Nov 28 13:54:01 crc kubenswrapper[4857]: W1128 13:54:01.882172 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbf608325_3713_4ff6_8e16_c7993618ef71.slice/crio-138864600aa88c6a4bb8c8c1b4c0edfbf27f19e4a2a11296ba0620b40673bb94 WatchSource:0}: Error finding container 138864600aa88c6a4bb8c8c1b4c0edfbf27f19e4a2a11296ba0620b40673bb94: Status 404 returned error can't find the container with id 138864600aa88c6a4bb8c8c1b4c0edfbf27f19e4a2a11296ba0620b40673bb94 Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.899991 4857 scope.go:117] "RemoveContainer" containerID="80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868" Nov 28 13:54:01 crc kubenswrapper[4857]: E1128 13:54:01.913614 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868\": container with ID starting with 80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868 not found: ID does not exist" containerID="80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.913697 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868"} err="failed to get container status \"80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868\": rpc error: code = NotFound desc = could not find container \"80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868\": container with ID starting with 80ec56a191e53615848c61fbd0ec4b961f8f1cb9cb566bfa3cbe8acc845d5868 not found: ID does not exist" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.913776 4857 scope.go:117] "RemoveContainer" containerID="4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c" Nov 28 13:54:01 crc kubenswrapper[4857]: E1128 13:54:01.940389 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c\": container with ID starting with 4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c not found: ID does not exist" containerID="4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.940448 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c"} err="failed to get container status \"4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c\": rpc error: code = NotFound desc = could not find container \"4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c\": container with ID starting with 4f8c3b942b169bbfac24fd42fac9c649c3ea53527c69f811c3e005c1771aa56c not found: ID does not exist" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.940492 4857 scope.go:117] "RemoveContainer" containerID="dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53" Nov 28 13:54:01 crc kubenswrapper[4857]: E1128 13:54:01.941213 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53\": container with ID starting with dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53 not found: ID does not exist" containerID="dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53" Nov 28 13:54:01 crc kubenswrapper[4857]: I1128 13:54:01.941241 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53"} err="failed to get container status \"dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53\": rpc error: code = NotFound desc = could not find container \"dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53\": container with ID starting with dbb3b535abd236bacca3a2228c172a08bec7293936fd8e91071564ff9aa4df53 not found: ID does not exist" Nov 28 13:54:02 crc kubenswrapper[4857]: I1128 13:54:02.244085 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" path="/var/lib/kubelet/pods/33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c/volumes" Nov 28 13:54:02 crc kubenswrapper[4857]: I1128 13:54:02.245859 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c21ba48-6d3a-4d00-bc9d-01eff9227731" path="/var/lib/kubelet/pods/5c21ba48-6d3a-4d00-bc9d-01eff9227731/volumes" Nov 28 13:54:02 crc kubenswrapper[4857]: I1128 13:54:02.791301 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf608325-3713-4ff6-8e16-c7993618ef71" containerID="565059e035a1a68340f03f186060685bb0562fc858f4fe8a856d1066b91f7e02" exitCode=0 Nov 28 13:54:02 crc kubenswrapper[4857]: I1128 13:54:02.791389 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" event={"ID":"bf608325-3713-4ff6-8e16-c7993618ef71","Type":"ContainerDied","Data":"565059e035a1a68340f03f186060685bb0562fc858f4fe8a856d1066b91f7e02"} Nov 28 13:54:02 crc kubenswrapper[4857]: I1128 13:54:02.791451 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" event={"ID":"bf608325-3713-4ff6-8e16-c7993618ef71","Type":"ContainerStarted","Data":"138864600aa88c6a4bb8c8c1b4c0edfbf27f19e4a2a11296ba0620b40673bb94"} Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.096416 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.097156 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-central-agent" containerID="cri-o://8a760445847bc1acc806a88969d2e416dafa158955c11eb113300fb847bb776f" gracePeriod=30 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.097243 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="sg-core" containerID="cri-o://817a2b90f431370e1d9271ce616a9a77b4b12aa5cdb83a686aa197fc0dc0b533" gracePeriod=30 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.097290 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-notification-agent" containerID="cri-o://9d5cf7d0ea097dc6c2fa0cb799bb6dd66a4c34069e456f7068e8473181139014" gracePeriod=30 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.097385 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="proxy-httpd" containerID="cri-o://c313ea57797c6b2111c64aa9c6673d37933e1fd8510d6832d2e38d964d052b20" gracePeriod=30 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.149358 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.200720 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.195:3000/\": read tcp 10.217.0.2:56586->10.217.0.195:3000: read: connection reset by peer" Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.704577 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.807277 4857 generic.go:334] "Generic (PLEG): container finished" podID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerID="c313ea57797c6b2111c64aa9c6673d37933e1fd8510d6832d2e38d964d052b20" exitCode=0 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.808334 4857 generic.go:334] "Generic (PLEG): container finished" podID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerID="817a2b90f431370e1d9271ce616a9a77b4b12aa5cdb83a686aa197fc0dc0b533" exitCode=2 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.808414 4857 generic.go:334] "Generic (PLEG): container finished" podID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerID="8a760445847bc1acc806a88969d2e416dafa158955c11eb113300fb847bb776f" exitCode=0 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.807455 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerDied","Data":"c313ea57797c6b2111c64aa9c6673d37933e1fd8510d6832d2e38d964d052b20"} Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.808595 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerDied","Data":"817a2b90f431370e1d9271ce616a9a77b4b12aa5cdb83a686aa197fc0dc0b533"} Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.808664 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerDied","Data":"8a760445847bc1acc806a88969d2e416dafa158955c11eb113300fb847bb776f"} Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.811542 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-log" containerID="cri-o://091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860" gracePeriod=30 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.812880 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" event={"ID":"bf608325-3713-4ff6-8e16-c7993618ef71","Type":"ContainerStarted","Data":"d13a0f999cf39e2cf71564829a093c201bf5675270b0ded0fe06d26418a0bf83"} Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.813002 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.813197 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-api" containerID="cri-o://1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c" gracePeriod=30 Nov 28 13:54:03 crc kubenswrapper[4857]: I1128 13:54:03.838083 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" podStartSLOduration=3.838062849 podStartE2EDuration="3.838062849s" podCreationTimestamp="2025-11-28 13:54:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:54:03.835620195 +0000 UTC m=+1493.959561632" watchObservedRunningTime="2025-11-28 13:54:03.838062849 +0000 UTC m=+1493.962004286" Nov 28 13:54:04 crc kubenswrapper[4857]: I1128 13:54:04.824660 4857 generic.go:334] "Generic (PLEG): container finished" podID="64ab5843-1111-459f-81c5-12010ea28156" containerID="091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860" exitCode=143 Nov 28 13:54:04 crc kubenswrapper[4857]: I1128 13:54:04.824718 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64ab5843-1111-459f-81c5-12010ea28156","Type":"ContainerDied","Data":"091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860"} Nov 28 13:54:04 crc kubenswrapper[4857]: I1128 13:54:04.828631 4857 generic.go:334] "Generic (PLEG): container finished" podID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerID="9d5cf7d0ea097dc6c2fa0cb799bb6dd66a4c34069e456f7068e8473181139014" exitCode=0 Nov 28 13:54:04 crc kubenswrapper[4857]: I1128 13:54:04.828673 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerDied","Data":"9d5cf7d0ea097dc6c2fa0cb799bb6dd66a4c34069e456f7068e8473181139014"} Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.417490 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.419505 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-combined-ca-bundle\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.419643 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-scripts\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.419722 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-ceilometer-tls-certs\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.419905 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-log-httpd\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.420660 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ld57p\" (UniqueName: \"kubernetes.io/projected/8c3e623f-697a-4a99-b364-dc4ade09c3d0-kube-api-access-ld57p\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.420622 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.421709 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-sg-core-conf-yaml\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.421884 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-run-httpd\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.421981 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-config-data\") pod \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\" (UID: \"8c3e623f-697a-4a99-b364-dc4ade09c3d0\") " Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.422808 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.424241 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.430507 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c3e623f-697a-4a99-b364-dc4ade09c3d0-kube-api-access-ld57p" (OuterVolumeSpecName: "kube-api-access-ld57p") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "kube-api-access-ld57p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.432435 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-scripts" (OuterVolumeSpecName: "scripts") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.462204 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.504183 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.524216 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.524258 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.524276 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ld57p\" (UniqueName: \"kubernetes.io/projected/8c3e623f-697a-4a99-b364-dc4ade09c3d0-kube-api-access-ld57p\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.524289 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.524301 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8c3e623f-697a-4a99-b364-dc4ade09c3d0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.553882 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.579742 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-config-data" (OuterVolumeSpecName: "config-data") pod "8c3e623f-697a-4a99-b364-dc4ade09c3d0" (UID: "8c3e623f-697a-4a99-b364-dc4ade09c3d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.625555 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.625612 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c3e623f-697a-4a99-b364-dc4ade09c3d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.845907 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8c3e623f-697a-4a99-b364-dc4ade09c3d0","Type":"ContainerDied","Data":"6658c6de5ff28ebd90640177beec5dbbdc5c7d2236eacc2246f3e84d7bbda1ab"} Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.846312 4857 scope.go:117] "RemoveContainer" containerID="c313ea57797c6b2111c64aa9c6673d37933e1fd8510d6832d2e38d964d052b20" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.846030 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.891290 4857 scope.go:117] "RemoveContainer" containerID="817a2b90f431370e1d9271ce616a9a77b4b12aa5cdb83a686aa197fc0dc0b533" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.902167 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.917754 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927307 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927700 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-notification-agent" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927721 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-notification-agent" Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927747 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="extract-content" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927755 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="extract-content" Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927772 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="registry-server" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927780 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="registry-server" Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927796 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-central-agent" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927802 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-central-agent" Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927813 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="proxy-httpd" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927818 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="proxy-httpd" Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927838 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="extract-utilities" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927845 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="extract-utilities" Nov 28 13:54:05 crc kubenswrapper[4857]: E1128 13:54:05.927858 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="sg-core" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.927863 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="sg-core" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.928152 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-notification-agent" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.928169 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="ceilometer-central-agent" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.928175 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="proxy-httpd" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.928202 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" containerName="sg-core" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.928213 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="33d1f6aa-3c2d-4fd0-b7c1-8a73dd007b8c" containerName="registry-server" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.930067 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931423 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931499 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-run-httpd\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931541 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xvv7\" (UniqueName: \"kubernetes.io/projected/e1e13053-d5d0-4d38-8758-4ebf494ededb-kube-api-access-6xvv7\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-log-httpd\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931694 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931712 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-config-data\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-scripts\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.931762 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.932906 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.933241 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.936036 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.938865 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.959009 4857 scope.go:117] "RemoveContainer" containerID="9d5cf7d0ea097dc6c2fa0cb799bb6dd66a4c34069e456f7068e8473181139014" Nov 28 13:54:05 crc kubenswrapper[4857]: I1128 13:54:05.978674 4857 scope.go:117] "RemoveContainer" containerID="8a760445847bc1acc806a88969d2e416dafa158955c11eb113300fb847bb776f" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033385 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-log-httpd\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033434 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033471 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-config-data\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033515 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-scripts\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033541 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033596 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033632 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-run-httpd\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033664 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xvv7\" (UniqueName: \"kubernetes.io/projected/e1e13053-d5d0-4d38-8758-4ebf494ededb-kube-api-access-6xvv7\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.033817 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-log-httpd\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.035963 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-run-httpd\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.038535 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.038804 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-scripts\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.039276 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.040085 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.040768 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-config-data\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.050554 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xvv7\" (UniqueName: \"kubernetes.io/projected/e1e13053-d5d0-4d38-8758-4ebf494ededb-kube-api-access-6xvv7\") pod \"ceilometer-0\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.239884 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c3e623f-697a-4a99-b364-dc4ade09c3d0" path="/var/lib/kubelet/pods/8c3e623f-697a-4a99-b364-dc4ade09c3d0/volumes" Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.259562 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:54:06 crc kubenswrapper[4857]: W1128 13:54:06.695358 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode1e13053_d5d0_4d38_8758_4ebf494ededb.slice/crio-289009e237990e27dc94afc0837c8cdeafb1f7e41885cfba8e4cd247ede7bdc4 WatchSource:0}: Error finding container 289009e237990e27dc94afc0837c8cdeafb1f7e41885cfba8e4cd247ede7bdc4: Status 404 returned error can't find the container with id 289009e237990e27dc94afc0837c8cdeafb1f7e41885cfba8e4cd247ede7bdc4 Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.702923 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:54:06 crc kubenswrapper[4857]: I1128 13:54:06.857585 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerStarted","Data":"289009e237990e27dc94afc0837c8cdeafb1f7e41885cfba8e4cd247ede7bdc4"} Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.429351 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.462546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-combined-ca-bundle\") pod \"64ab5843-1111-459f-81c5-12010ea28156\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.462789 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-config-data\") pod \"64ab5843-1111-459f-81c5-12010ea28156\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.462863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64ab5843-1111-459f-81c5-12010ea28156-logs\") pod \"64ab5843-1111-459f-81c5-12010ea28156\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.462884 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49448\" (UniqueName: \"kubernetes.io/projected/64ab5843-1111-459f-81c5-12010ea28156-kube-api-access-49448\") pod \"64ab5843-1111-459f-81c5-12010ea28156\" (UID: \"64ab5843-1111-459f-81c5-12010ea28156\") " Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.464703 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64ab5843-1111-459f-81c5-12010ea28156-logs" (OuterVolumeSpecName: "logs") pod "64ab5843-1111-459f-81c5-12010ea28156" (UID: "64ab5843-1111-459f-81c5-12010ea28156"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.470672 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64ab5843-1111-459f-81c5-12010ea28156-kube-api-access-49448" (OuterVolumeSpecName: "kube-api-access-49448") pod "64ab5843-1111-459f-81c5-12010ea28156" (UID: "64ab5843-1111-459f-81c5-12010ea28156"). InnerVolumeSpecName "kube-api-access-49448". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.515143 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "64ab5843-1111-459f-81c5-12010ea28156" (UID: "64ab5843-1111-459f-81c5-12010ea28156"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.516453 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-config-data" (OuterVolumeSpecName: "config-data") pod "64ab5843-1111-459f-81c5-12010ea28156" (UID: "64ab5843-1111-459f-81c5-12010ea28156"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.565532 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.565565 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/64ab5843-1111-459f-81c5-12010ea28156-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.565575 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49448\" (UniqueName: \"kubernetes.io/projected/64ab5843-1111-459f-81c5-12010ea28156-kube-api-access-49448\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.565586 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/64ab5843-1111-459f-81c5-12010ea28156-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.868714 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerStarted","Data":"c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135"} Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.870969 4857 generic.go:334] "Generic (PLEG): container finished" podID="64ab5843-1111-459f-81c5-12010ea28156" containerID="1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c" exitCode=0 Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.871010 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64ab5843-1111-459f-81c5-12010ea28156","Type":"ContainerDied","Data":"1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c"} Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.871064 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"64ab5843-1111-459f-81c5-12010ea28156","Type":"ContainerDied","Data":"b538ef043517fd58d78ab8089e04d63bfacb10b481e64602b124c7d543f9670f"} Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.871065 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.871083 4857 scope.go:117] "RemoveContainer" containerID="1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.895692 4857 scope.go:117] "RemoveContainer" containerID="091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.909967 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.918548 4857 scope.go:117] "RemoveContainer" containerID="1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c" Nov 28 13:54:07 crc kubenswrapper[4857]: E1128 13:54:07.919066 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c\": container with ID starting with 1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c not found: ID does not exist" containerID="1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.919133 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c"} err="failed to get container status \"1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c\": rpc error: code = NotFound desc = could not find container \"1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c\": container with ID starting with 1bbc418789dffbbc77373f48c99e06529ff402e40c50288a23ec2f0e2065580c not found: ID does not exist" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.919181 4857 scope.go:117] "RemoveContainer" containerID="091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860" Nov 28 13:54:07 crc kubenswrapper[4857]: E1128 13:54:07.919536 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860\": container with ID starting with 091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860 not found: ID does not exist" containerID="091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.919577 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860"} err="failed to get container status \"091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860\": rpc error: code = NotFound desc = could not find container \"091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860\": container with ID starting with 091a1ea7a55a8394715c1145e6b70768a015c0c8e638ac8cb3aa73892cbe3860 not found: ID does not exist" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.920270 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.972058 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:07 crc kubenswrapper[4857]: E1128 13:54:07.972442 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-log" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.972460 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-log" Nov 28 13:54:07 crc kubenswrapper[4857]: E1128 13:54:07.972499 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-api" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.972508 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-api" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.972718 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-api" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.972734 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="64ab5843-1111-459f-81c5-12010ea28156" containerName="nova-api-log" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.974543 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.977935 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.978101 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 13:54:07 crc kubenswrapper[4857]: I1128 13:54:07.978214 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.002165 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.078017 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jzxl9\" (UniqueName: \"kubernetes.io/projected/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-kube-api-access-jzxl9\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.078270 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-config-data\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.078342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.078389 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-logs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.078557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.078582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.148204 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.168135 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.181435 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.181479 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.181543 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jzxl9\" (UniqueName: \"kubernetes.io/projected/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-kube-api-access-jzxl9\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.181652 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-config-data\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.182254 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.182383 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-logs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.182718 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-logs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.188599 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-internal-tls-certs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.189259 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-config-data\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.190653 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-public-tls-certs\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.193924 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.199757 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jzxl9\" (UniqueName: \"kubernetes.io/projected/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-kube-api-access-jzxl9\") pod \"nova-api-0\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.239194 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64ab5843-1111-459f-81c5-12010ea28156" path="/var/lib/kubelet/pods/64ab5843-1111-459f-81c5-12010ea28156/volumes" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.315135 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.837592 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:08 crc kubenswrapper[4857]: W1128 13:54:08.843458 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf225d3b6_d482_467b_a164_7fcd7f5b8cd1.slice/crio-46edcfeb0fc2a9304a57d4a391b693507f159f778bd305a9f4143c32312e5294 WatchSource:0}: Error finding container 46edcfeb0fc2a9304a57d4a391b693507f159f778bd305a9f4143c32312e5294: Status 404 returned error can't find the container with id 46edcfeb0fc2a9304a57d4a391b693507f159f778bd305a9f4143c32312e5294 Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.887093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f225d3b6-d482-467b-a164-7fcd7f5b8cd1","Type":"ContainerStarted","Data":"46edcfeb0fc2a9304a57d4a391b693507f159f778bd305a9f4143c32312e5294"} Nov 28 13:54:08 crc kubenswrapper[4857]: I1128 13:54:08.907666 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.043505 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-2d754"] Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.044670 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.047641 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.047830 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.055471 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-2d754"] Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.234113 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.234390 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhg98\" (UniqueName: \"kubernetes.io/projected/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-kube-api-access-nhg98\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.234449 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-scripts\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.234527 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-config-data\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.336516 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.336569 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhg98\" (UniqueName: \"kubernetes.io/projected/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-kube-api-access-nhg98\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.336620 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-scripts\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.336700 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-config-data\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.342661 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-config-data\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.345021 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-scripts\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.352240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.357207 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhg98\" (UniqueName: \"kubernetes.io/projected/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-kube-api-access-nhg98\") pod \"nova-cell1-cell-mapping-2d754\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.365700 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.837966 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-2d754"] Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.909015 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2d754" event={"ID":"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9","Type":"ContainerStarted","Data":"05aaa84fa92c97ff30259692e6fbb84496a3d4cae4010d7897ff65388fd57a4b"} Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.911774 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f225d3b6-d482-467b-a164-7fcd7f5b8cd1","Type":"ContainerStarted","Data":"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809"} Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.911843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f225d3b6-d482-467b-a164-7fcd7f5b8cd1","Type":"ContainerStarted","Data":"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d"} Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.916497 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerStarted","Data":"5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f"} Nov 28 13:54:09 crc kubenswrapper[4857]: I1128 13:54:09.932011 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.931990651 podStartE2EDuration="2.931990651s" podCreationTimestamp="2025-11-28 13:54:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:54:09.927579405 +0000 UTC m=+1500.051520842" watchObservedRunningTime="2025-11-28 13:54:09.931990651 +0000 UTC m=+1500.055932088" Nov 28 13:54:10 crc kubenswrapper[4857]: I1128 13:54:10.928283 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2d754" event={"ID":"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9","Type":"ContainerStarted","Data":"f54f8a6fd4e0e294545a7856447cceacaee0009bf4bf76a318b17263cefceaa2"} Nov 28 13:54:10 crc kubenswrapper[4857]: I1128 13:54:10.932454 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerStarted","Data":"f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d"} Nov 28 13:54:10 crc kubenswrapper[4857]: I1128 13:54:10.955893 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-2d754" podStartSLOduration=1.955870507 podStartE2EDuration="1.955870507s" podCreationTimestamp="2025-11-28 13:54:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:54:10.944432276 +0000 UTC m=+1501.068373753" watchObservedRunningTime="2025-11-28 13:54:10.955870507 +0000 UTC m=+1501.079811954" Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.348301 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.435922 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-qcdch"] Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.436927 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" podUID="d8a47a38-60f9-4084-8341-94da42266558" containerName="dnsmasq-dns" containerID="cri-o://a38af6fde6496d26130d969ef4105a2c9b98637c9a5a25701815126e08a86976" gracePeriod=10 Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.944730 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8a47a38-60f9-4084-8341-94da42266558" containerID="a38af6fde6496d26130d969ef4105a2c9b98637c9a5a25701815126e08a86976" exitCode=0 Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.944837 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" event={"ID":"d8a47a38-60f9-4084-8341-94da42266558","Type":"ContainerDied","Data":"a38af6fde6496d26130d969ef4105a2c9b98637c9a5a25701815126e08a86976"} Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.945094 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" event={"ID":"d8a47a38-60f9-4084-8341-94da42266558","Type":"ContainerDied","Data":"5d8d2b1d0e37f27652e547e4539a5c1bdd6f2740ae2c10eeca6eee8e0c8fe9e1"} Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.945112 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d8d2b1d0e37f27652e547e4539a5c1bdd6f2740ae2c10eeca6eee8e0c8fe9e1" Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.948437 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerStarted","Data":"b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a"} Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.976487 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:54:11 crc kubenswrapper[4857]: I1128 13:54:11.981306 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.403776394 podStartE2EDuration="6.981283805s" podCreationTimestamp="2025-11-28 13:54:05 +0000 UTC" firstStartedPulling="2025-11-28 13:54:06.698051056 +0000 UTC m=+1496.821992493" lastFinishedPulling="2025-11-28 13:54:11.275558467 +0000 UTC m=+1501.399499904" observedRunningTime="2025-11-28 13:54:11.975569974 +0000 UTC m=+1502.099511421" watchObservedRunningTime="2025-11-28 13:54:11.981283805 +0000 UTC m=+1502.105225242" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.098087 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-swift-storage-0\") pod \"d8a47a38-60f9-4084-8341-94da42266558\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.098163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-config\") pod \"d8a47a38-60f9-4084-8341-94da42266558\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.098221 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-nb\") pod \"d8a47a38-60f9-4084-8341-94da42266558\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.098249 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-sb\") pod \"d8a47a38-60f9-4084-8341-94da42266558\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.098287 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-svc\") pod \"d8a47a38-60f9-4084-8341-94da42266558\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.098380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4v2l6\" (UniqueName: \"kubernetes.io/projected/d8a47a38-60f9-4084-8341-94da42266558-kube-api-access-4v2l6\") pod \"d8a47a38-60f9-4084-8341-94da42266558\" (UID: \"d8a47a38-60f9-4084-8341-94da42266558\") " Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.103345 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8a47a38-60f9-4084-8341-94da42266558-kube-api-access-4v2l6" (OuterVolumeSpecName: "kube-api-access-4v2l6") pod "d8a47a38-60f9-4084-8341-94da42266558" (UID: "d8a47a38-60f9-4084-8341-94da42266558"). InnerVolumeSpecName "kube-api-access-4v2l6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.152057 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-config" (OuterVolumeSpecName: "config") pod "d8a47a38-60f9-4084-8341-94da42266558" (UID: "d8a47a38-60f9-4084-8341-94da42266558"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.154140 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d8a47a38-60f9-4084-8341-94da42266558" (UID: "d8a47a38-60f9-4084-8341-94da42266558"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.158712 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d8a47a38-60f9-4084-8341-94da42266558" (UID: "d8a47a38-60f9-4084-8341-94da42266558"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.159733 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d8a47a38-60f9-4084-8341-94da42266558" (UID: "d8a47a38-60f9-4084-8341-94da42266558"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.163154 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d8a47a38-60f9-4084-8341-94da42266558" (UID: "d8a47a38-60f9-4084-8341-94da42266558"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.200160 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4v2l6\" (UniqueName: \"kubernetes.io/projected/d8a47a38-60f9-4084-8341-94da42266558-kube-api-access-4v2l6\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.200452 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.200553 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.200622 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.200686 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.200751 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d8a47a38-60f9-4084-8341-94da42266558-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.957100 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-bccf8f775-qcdch" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.957228 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.982462 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-qcdch"] Nov 28 13:54:12 crc kubenswrapper[4857]: I1128 13:54:12.992880 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-bccf8f775-qcdch"] Nov 28 13:54:14 crc kubenswrapper[4857]: I1128 13:54:14.254935 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8a47a38-60f9-4084-8341-94da42266558" path="/var/lib/kubelet/pods/d8a47a38-60f9-4084-8341-94da42266558/volumes" Nov 28 13:54:14 crc kubenswrapper[4857]: I1128 13:54:14.979370 4857 generic.go:334] "Generic (PLEG): container finished" podID="6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" containerID="f54f8a6fd4e0e294545a7856447cceacaee0009bf4bf76a318b17263cefceaa2" exitCode=0 Nov 28 13:54:14 crc kubenswrapper[4857]: I1128 13:54:14.979411 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2d754" event={"ID":"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9","Type":"ContainerDied","Data":"f54f8a6fd4e0e294545a7856447cceacaee0009bf4bf76a318b17263cefceaa2"} Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.342839 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.481629 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-combined-ca-bundle\") pod \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.481701 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-config-data\") pod \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.481805 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhg98\" (UniqueName: \"kubernetes.io/projected/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-kube-api-access-nhg98\") pod \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.482266 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-scripts\") pod \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\" (UID: \"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9\") " Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.487848 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-kube-api-access-nhg98" (OuterVolumeSpecName: "kube-api-access-nhg98") pod "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" (UID: "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9"). InnerVolumeSpecName "kube-api-access-nhg98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.488345 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-scripts" (OuterVolumeSpecName: "scripts") pod "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" (UID: "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.513234 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" (UID: "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.530292 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-config-data" (OuterVolumeSpecName: "config-data") pod "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" (UID: "6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.585758 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.585809 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.585831 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.585851 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhg98\" (UniqueName: \"kubernetes.io/projected/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9-kube-api-access-nhg98\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.997509 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-2d754" event={"ID":"6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9","Type":"ContainerDied","Data":"05aaa84fa92c97ff30259692e6fbb84496a3d4cae4010d7897ff65388fd57a4b"} Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.997569 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05aaa84fa92c97ff30259692e6fbb84496a3d4cae4010d7897ff65388fd57a4b" Nov 28 13:54:16 crc kubenswrapper[4857]: I1128 13:54:16.997606 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-2d754" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.188176 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.189092 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-api" containerID="cri-o://eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809" gracePeriod=30 Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.189318 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-log" containerID="cri-o://5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d" gracePeriod=30 Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.225505 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.225801 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-log" containerID="cri-o://9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158" gracePeriod=30 Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.225896 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-metadata" containerID="cri-o://d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215" gracePeriod=30 Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.240179 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.240425 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f52472a6-ce63-4192-9ef4-3d77b633ef70" containerName="nova-scheduler-scheduler" containerID="cri-o://9f84f9d0085e2e21c564f7ffe093e88ebd9a92661819a1d7ef85e3d19ef948f0" gracePeriod=30 Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.759730 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.914771 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jzxl9\" (UniqueName: \"kubernetes.io/projected/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-kube-api-access-jzxl9\") pod \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.914871 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-config-data\") pod \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.914905 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-internal-tls-certs\") pod \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.914973 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-combined-ca-bundle\") pod \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.914993 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-logs\") pod \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.915083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-public-tls-certs\") pod \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\" (UID: \"f225d3b6-d482-467b-a164-7fcd7f5b8cd1\") " Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.915652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-logs" (OuterVolumeSpecName: "logs") pod "f225d3b6-d482-467b-a164-7fcd7f5b8cd1" (UID: "f225d3b6-d482-467b-a164-7fcd7f5b8cd1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.920075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-kube-api-access-jzxl9" (OuterVolumeSpecName: "kube-api-access-jzxl9") pod "f225d3b6-d482-467b-a164-7fcd7f5b8cd1" (UID: "f225d3b6-d482-467b-a164-7fcd7f5b8cd1"). InnerVolumeSpecName "kube-api-access-jzxl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.941926 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-config-data" (OuterVolumeSpecName: "config-data") pod "f225d3b6-d482-467b-a164-7fcd7f5b8cd1" (UID: "f225d3b6-d482-467b-a164-7fcd7f5b8cd1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.945750 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f225d3b6-d482-467b-a164-7fcd7f5b8cd1" (UID: "f225d3b6-d482-467b-a164-7fcd7f5b8cd1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.963497 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f225d3b6-d482-467b-a164-7fcd7f5b8cd1" (UID: "f225d3b6-d482-467b-a164-7fcd7f5b8cd1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:17 crc kubenswrapper[4857]: I1128 13:54:17.973753 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f225d3b6-d482-467b-a164-7fcd7f5b8cd1" (UID: "f225d3b6-d482-467b-a164-7fcd7f5b8cd1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.011914 4857 generic.go:334] "Generic (PLEG): container finished" podID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerID="eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809" exitCode=0 Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.011967 4857 generic.go:334] "Generic (PLEG): container finished" podID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerID="5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d" exitCode=143 Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.012020 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.011993 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f225d3b6-d482-467b-a164-7fcd7f5b8cd1","Type":"ContainerDied","Data":"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809"} Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.012090 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f225d3b6-d482-467b-a164-7fcd7f5b8cd1","Type":"ContainerDied","Data":"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d"} Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.012107 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f225d3b6-d482-467b-a164-7fcd7f5b8cd1","Type":"ContainerDied","Data":"46edcfeb0fc2a9304a57d4a391b693507f159f778bd305a9f4143c32312e5294"} Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.012129 4857 scope.go:117] "RemoveContainer" containerID="eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017393 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017420 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017435 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017444 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017456 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017465 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jzxl9\" (UniqueName: \"kubernetes.io/projected/f225d3b6-d482-467b-a164-7fcd7f5b8cd1-kube-api-access-jzxl9\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017569 4857 generic.go:334] "Generic (PLEG): container finished" podID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerID="9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158" exitCode=143 Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.017598 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a7818ef-92f1-45d7-8e82-a12bd9e52025","Type":"ContainerDied","Data":"9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158"} Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.035752 4857 scope.go:117] "RemoveContainer" containerID="5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.050701 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.060865 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.069434 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.069890 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" containerName="nova-manage" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.069915 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" containerName="nova-manage" Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.069931 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a47a38-60f9-4084-8341-94da42266558" containerName="dnsmasq-dns" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.069939 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a47a38-60f9-4084-8341-94da42266558" containerName="dnsmasq-dns" Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.069972 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-log" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.069981 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-log" Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.070000 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8a47a38-60f9-4084-8341-94da42266558" containerName="init" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070006 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8a47a38-60f9-4084-8341-94da42266558" containerName="init" Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.070046 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-api" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070055 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-api" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070089 4857 scope.go:117] "RemoveContainer" containerID="eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070271 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8a47a38-60f9-4084-8341-94da42266558" containerName="dnsmasq-dns" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070299 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" containerName="nova-manage" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070317 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-log" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070336 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" containerName="nova-api-api" Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.070753 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809\": container with ID starting with eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809 not found: ID does not exist" containerID="eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070823 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809"} err="failed to get container status \"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809\": rpc error: code = NotFound desc = could not find container \"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809\": container with ID starting with eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809 not found: ID does not exist" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.070857 4857 scope.go:117] "RemoveContainer" containerID="5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.071603 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: E1128 13:54:18.071830 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d\": container with ID starting with 5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d not found: ID does not exist" containerID="5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.071971 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d"} err="failed to get container status \"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d\": rpc error: code = NotFound desc = could not find container \"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d\": container with ID starting with 5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d not found: ID does not exist" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.072095 4857 scope.go:117] "RemoveContainer" containerID="eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.072528 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809"} err="failed to get container status \"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809\": rpc error: code = NotFound desc = could not find container \"eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809\": container with ID starting with eecbd4d5b3045a89d8472f8d737bade9ffbb2d321d1b7326722483b3c1a61809 not found: ID does not exist" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.072564 4857 scope.go:117] "RemoveContainer" containerID="5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.072797 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d"} err="failed to get container status \"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d\": rpc error: code = NotFound desc = could not find container \"5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d\": container with ID starting with 5763309fa86a0f99a017dac7375f11f5ad714499cd1804774fc633ee2c2ed72d not found: ID does not exist" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.075799 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.076083 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.076238 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.080266 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.220564 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-config-data\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.221525 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-public-tls-certs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.221568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c69ec619-0d17-4a49-8f97-6db48291122d-logs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.221645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8278s\" (UniqueName: \"kubernetes.io/projected/c69ec619-0d17-4a49-8f97-6db48291122d-kube-api-access-8278s\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.221722 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.221796 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.240823 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f225d3b6-d482-467b-a164-7fcd7f5b8cd1" path="/var/lib/kubelet/pods/f225d3b6-d482-467b-a164-7fcd7f5b8cd1/volumes" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.324004 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.324076 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-config-data\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.324125 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-public-tls-certs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.324149 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c69ec619-0d17-4a49-8f97-6db48291122d-logs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.324192 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8278s\" (UniqueName: \"kubernetes.io/projected/c69ec619-0d17-4a49-8f97-6db48291122d-kube-api-access-8278s\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.324244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.325016 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c69ec619-0d17-4a49-8f97-6db48291122d-logs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.327325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.328202 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-internal-tls-certs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.328625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-public-tls-certs\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.328722 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-config-data\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.339738 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8278s\" (UniqueName: \"kubernetes.io/projected/c69ec619-0d17-4a49-8f97-6db48291122d-kube-api-access-8278s\") pod \"nova-api-0\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.423832 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:54:18 crc kubenswrapper[4857]: I1128 13:54:18.885520 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:54:19 crc kubenswrapper[4857]: I1128 13:54:19.028311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c69ec619-0d17-4a49-8f97-6db48291122d","Type":"ContainerStarted","Data":"e61e758e98622c51a24780646978ed2c6c2596c72fab8d38d595a32c54cb9376"} Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.042868 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c69ec619-0d17-4a49-8f97-6db48291122d","Type":"ContainerStarted","Data":"2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301"} Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.043249 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c69ec619-0d17-4a49-8f97-6db48291122d","Type":"ContainerStarted","Data":"be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061"} Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.047413 4857 generic.go:334] "Generic (PLEG): container finished" podID="f52472a6-ce63-4192-9ef4-3d77b633ef70" containerID="9f84f9d0085e2e21c564f7ffe093e88ebd9a92661819a1d7ef85e3d19ef948f0" exitCode=0 Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.047489 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52472a6-ce63-4192-9ef4-3d77b633ef70","Type":"ContainerDied","Data":"9f84f9d0085e2e21c564f7ffe093e88ebd9a92661819a1d7ef85e3d19ef948f0"} Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.047522 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f52472a6-ce63-4192-9ef4-3d77b633ef70","Type":"ContainerDied","Data":"b94f09916edb92e39a9d434c1f407e3e18165f544275f68fc18124c0cc2b852f"} Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.047538 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b94f09916edb92e39a9d434c1f407e3e18165f544275f68fc18124c0cc2b852f" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.071541 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.071522516 podStartE2EDuration="2.071522516s" podCreationTimestamp="2025-11-28 13:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:54:20.064407388 +0000 UTC m=+1510.188348835" watchObservedRunningTime="2025-11-28 13:54:20.071522516 +0000 UTC m=+1510.195463953" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.093535 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.261376 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-combined-ca-bundle\") pod \"f52472a6-ce63-4192-9ef4-3d77b633ef70\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.261817 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9svlk\" (UniqueName: \"kubernetes.io/projected/f52472a6-ce63-4192-9ef4-3d77b633ef70-kube-api-access-9svlk\") pod \"f52472a6-ce63-4192-9ef4-3d77b633ef70\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.261877 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-config-data\") pod \"f52472a6-ce63-4192-9ef4-3d77b633ef70\" (UID: \"f52472a6-ce63-4192-9ef4-3d77b633ef70\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.269246 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f52472a6-ce63-4192-9ef4-3d77b633ef70-kube-api-access-9svlk" (OuterVolumeSpecName: "kube-api-access-9svlk") pod "f52472a6-ce63-4192-9ef4-3d77b633ef70" (UID: "f52472a6-ce63-4192-9ef4-3d77b633ef70"). InnerVolumeSpecName "kube-api-access-9svlk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.291258 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f52472a6-ce63-4192-9ef4-3d77b633ef70" (UID: "f52472a6-ce63-4192-9ef4-3d77b633ef70"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.298026 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-config-data" (OuterVolumeSpecName: "config-data") pod "f52472a6-ce63-4192-9ef4-3d77b633ef70" (UID: "f52472a6-ce63-4192-9ef4-3d77b633ef70"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.364048 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9svlk\" (UniqueName: \"kubernetes.io/projected/f52472a6-ce63-4192-9ef4-3d77b633ef70-kube-api-access-9svlk\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.364078 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.364089 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f52472a6-ce63-4192-9ef4-3d77b633ef70-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.364119 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": read tcp 10.217.0.2:50474->10.217.0.190:8775: read: connection reset by peer" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.364204 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.190:8775/\": read tcp 10.217.0.2:50480->10.217.0.190:8775: read: connection reset by peer" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.778879 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.974011 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-config-data\") pod \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.974054 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6q58c\" (UniqueName: \"kubernetes.io/projected/0a7818ef-92f1-45d7-8e82-a12bd9e52025-kube-api-access-6q58c\") pod \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.974149 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-combined-ca-bundle\") pod \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.974202 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a7818ef-92f1-45d7-8e82-a12bd9e52025-logs\") pod \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.974285 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-nova-metadata-tls-certs\") pod \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\" (UID: \"0a7818ef-92f1-45d7-8e82-a12bd9e52025\") " Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.974641 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a7818ef-92f1-45d7-8e82-a12bd9e52025-logs" (OuterVolumeSpecName: "logs") pod "0a7818ef-92f1-45d7-8e82-a12bd9e52025" (UID: "0a7818ef-92f1-45d7-8e82-a12bd9e52025"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.978118 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a7818ef-92f1-45d7-8e82-a12bd9e52025-kube-api-access-6q58c" (OuterVolumeSpecName: "kube-api-access-6q58c") pod "0a7818ef-92f1-45d7-8e82-a12bd9e52025" (UID: "0a7818ef-92f1-45d7-8e82-a12bd9e52025"). InnerVolumeSpecName "kube-api-access-6q58c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:54:20 crc kubenswrapper[4857]: I1128 13:54:20.999408 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-config-data" (OuterVolumeSpecName: "config-data") pod "0a7818ef-92f1-45d7-8e82-a12bd9e52025" (UID: "0a7818ef-92f1-45d7-8e82-a12bd9e52025"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.004061 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a7818ef-92f1-45d7-8e82-a12bd9e52025" (UID: "0a7818ef-92f1-45d7-8e82-a12bd9e52025"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.026420 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "0a7818ef-92f1-45d7-8e82-a12bd9e52025" (UID: "0a7818ef-92f1-45d7-8e82-a12bd9e52025"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.058357 4857 generic.go:334] "Generic (PLEG): container finished" podID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerID="d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215" exitCode=0 Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.058411 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.058456 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a7818ef-92f1-45d7-8e82-a12bd9e52025","Type":"ContainerDied","Data":"d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215"} Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.058522 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0a7818ef-92f1-45d7-8e82-a12bd9e52025","Type":"ContainerDied","Data":"8d0a0c615680dd2bec0228932c38cf564382037c23ac8797cba8cbb2d585ff7a"} Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.058546 4857 scope.go:117] "RemoveContainer" containerID="d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.058688 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.077071 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.077385 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.077404 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6q58c\" (UniqueName: \"kubernetes.io/projected/0a7818ef-92f1-45d7-8e82-a12bd9e52025-kube-api-access-6q58c\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.077419 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a7818ef-92f1-45d7-8e82-a12bd9e52025-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.077432 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0a7818ef-92f1-45d7-8e82-a12bd9e52025-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.083075 4857 scope.go:117] "RemoveContainer" containerID="9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.118320 4857 scope.go:117] "RemoveContainer" containerID="d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.118438 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: E1128 13:54:21.119195 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215\": container with ID starting with d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215 not found: ID does not exist" containerID="d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.119231 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215"} err="failed to get container status \"d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215\": rpc error: code = NotFound desc = could not find container \"d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215\": container with ID starting with d659fc88c7c531aad673ca9bd44a4ff85223bd937030442e1a2ed21300480215 not found: ID does not exist" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.119255 4857 scope.go:117] "RemoveContainer" containerID="9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158" Nov 28 13:54:21 crc kubenswrapper[4857]: E1128 13:54:21.120776 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158\": container with ID starting with 9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158 not found: ID does not exist" containerID="9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.120810 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158"} err="failed to get container status \"9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158\": rpc error: code = NotFound desc = could not find container \"9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158\": container with ID starting with 9c4532426545e665d29d95b5eb288b5f7fb85f3d59af5c678ec8a6ee12b47158 not found: ID does not exist" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.158641 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.189512 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.199053 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207199 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: E1128 13:54:21.207674 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f52472a6-ce63-4192-9ef4-3d77b633ef70" containerName="nova-scheduler-scheduler" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207696 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f52472a6-ce63-4192-9ef4-3d77b633ef70" containerName="nova-scheduler-scheduler" Nov 28 13:54:21 crc kubenswrapper[4857]: E1128 13:54:21.207719 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-metadata" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207726 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-metadata" Nov 28 13:54:21 crc kubenswrapper[4857]: E1128 13:54:21.207756 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-log" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207764 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-log" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207922 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-log" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207932 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" containerName="nova-metadata-metadata" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.207964 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f52472a6-ce63-4192-9ef4-3d77b633ef70" containerName="nova-scheduler-scheduler" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.209817 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.214980 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.215149 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.223682 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.225805 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.227819 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.230553 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.233748 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.383869 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.383916 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10f5dca0-ca0a-4e88-838f-14affb1dead5-logs\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.383966 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-config-data\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.384519 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.384654 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kz9m\" (UniqueName: \"kubernetes.io/projected/10f5dca0-ca0a-4e88-838f-14affb1dead5-kube-api-access-5kz9m\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.384779 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-config-data\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.384814 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.384835 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zcxwt\" (UniqueName: \"kubernetes.io/projected/e222fcd6-26e0-46af-82ab-7cf038a18195-kube-api-access-zcxwt\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.486886 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.486934 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10f5dca0-ca0a-4e88-838f-14affb1dead5-logs\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.486988 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-config-data\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.487062 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.487103 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kz9m\" (UniqueName: \"kubernetes.io/projected/10f5dca0-ca0a-4e88-838f-14affb1dead5-kube-api-access-5kz9m\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.487147 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-config-data\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.487166 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.487182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zcxwt\" (UniqueName: \"kubernetes.io/projected/e222fcd6-26e0-46af-82ab-7cf038a18195-kube-api-access-zcxwt\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.487972 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10f5dca0-ca0a-4e88-838f-14affb1dead5-logs\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.491228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-config-data\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.491591 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.492311 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-config-data\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.492413 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.494061 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.504411 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zcxwt\" (UniqueName: \"kubernetes.io/projected/e222fcd6-26e0-46af-82ab-7cf038a18195-kube-api-access-zcxwt\") pod \"nova-scheduler-0\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.506407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kz9m\" (UniqueName: \"kubernetes.io/projected/10f5dca0-ca0a-4e88-838f-14affb1dead5-kube-api-access-5kz9m\") pod \"nova-metadata-0\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " pod="openstack/nova-metadata-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.538434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:54:21 crc kubenswrapper[4857]: I1128 13:54:21.547862 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:54:22 crc kubenswrapper[4857]: I1128 13:54:22.032654 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:54:22 crc kubenswrapper[4857]: W1128 13:54:22.033660 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode222fcd6_26e0_46af_82ab_7cf038a18195.slice/crio-2295f3de8bb771d3b0e5e5547e934c009e2d6c187d7df4f80a67889c12cea8ff WatchSource:0}: Error finding container 2295f3de8bb771d3b0e5e5547e934c009e2d6c187d7df4f80a67889c12cea8ff: Status 404 returned error can't find the container with id 2295f3de8bb771d3b0e5e5547e934c009e2d6c187d7df4f80a67889c12cea8ff Nov 28 13:54:22 crc kubenswrapper[4857]: I1128 13:54:22.078296 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e222fcd6-26e0-46af-82ab-7cf038a18195","Type":"ContainerStarted","Data":"2295f3de8bb771d3b0e5e5547e934c009e2d6c187d7df4f80a67889c12cea8ff"} Nov 28 13:54:22 crc kubenswrapper[4857]: I1128 13:54:22.100427 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:54:22 crc kubenswrapper[4857]: W1128 13:54:22.108246 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod10f5dca0_ca0a_4e88_838f_14affb1dead5.slice/crio-90d2b27188d8be8064e748515e9e66bc806f8c6570931142c0dff042ab3adebb WatchSource:0}: Error finding container 90d2b27188d8be8064e748515e9e66bc806f8c6570931142c0dff042ab3adebb: Status 404 returned error can't find the container with id 90d2b27188d8be8064e748515e9e66bc806f8c6570931142c0dff042ab3adebb Nov 28 13:54:22 crc kubenswrapper[4857]: I1128 13:54:22.243109 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a7818ef-92f1-45d7-8e82-a12bd9e52025" path="/var/lib/kubelet/pods/0a7818ef-92f1-45d7-8e82-a12bd9e52025/volumes" Nov 28 13:54:22 crc kubenswrapper[4857]: I1128 13:54:22.244275 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f52472a6-ce63-4192-9ef4-3d77b633ef70" path="/var/lib/kubelet/pods/f52472a6-ce63-4192-9ef4-3d77b633ef70/volumes" Nov 28 13:54:23 crc kubenswrapper[4857]: I1128 13:54:23.091031 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e222fcd6-26e0-46af-82ab-7cf038a18195","Type":"ContainerStarted","Data":"9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217"} Nov 28 13:54:23 crc kubenswrapper[4857]: I1128 13:54:23.094599 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10f5dca0-ca0a-4e88-838f-14affb1dead5","Type":"ContainerStarted","Data":"bafd64668cff0693bb5967882f3c22be467009f95e4f118cff44737cfc28e0af"} Nov 28 13:54:23 crc kubenswrapper[4857]: I1128 13:54:23.094652 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10f5dca0-ca0a-4e88-838f-14affb1dead5","Type":"ContainerStarted","Data":"9eb233258cdcc0845df438e157136a632bd3695d5e815406ce0875d7029891d2"} Nov 28 13:54:23 crc kubenswrapper[4857]: I1128 13:54:23.094668 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10f5dca0-ca0a-4e88-838f-14affb1dead5","Type":"ContainerStarted","Data":"90d2b27188d8be8064e748515e9e66bc806f8c6570931142c0dff042ab3adebb"} Nov 28 13:54:23 crc kubenswrapper[4857]: I1128 13:54:23.114437 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.114420759 podStartE2EDuration="2.114420759s" podCreationTimestamp="2025-11-28 13:54:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:54:23.108441762 +0000 UTC m=+1513.232383199" watchObservedRunningTime="2025-11-28 13:54:23.114420759 +0000 UTC m=+1513.238362196" Nov 28 13:54:23 crc kubenswrapper[4857]: I1128 13:54:23.127689 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.127669808 podStartE2EDuration="2.127669808s" podCreationTimestamp="2025-11-28 13:54:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:54:23.122663576 +0000 UTC m=+1513.246605013" watchObservedRunningTime="2025-11-28 13:54:23.127669808 +0000 UTC m=+1513.251611245" Nov 28 13:54:26 crc kubenswrapper[4857]: I1128 13:54:26.539462 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 13:54:26 crc kubenswrapper[4857]: I1128 13:54:26.548324 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:54:26 crc kubenswrapper[4857]: I1128 13:54:26.548439 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 13:54:28 crc kubenswrapper[4857]: I1128 13:54:28.424511 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:54:28 crc kubenswrapper[4857]: I1128 13:54:28.424868 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 13:54:29 crc kubenswrapper[4857]: I1128 13:54:29.436097 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 13:54:29 crc kubenswrapper[4857]: I1128 13:54:29.436067 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:54:31 crc kubenswrapper[4857]: I1128 13:54:31.538828 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 13:54:31 crc kubenswrapper[4857]: I1128 13:54:31.549086 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:54:31 crc kubenswrapper[4857]: I1128 13:54:31.549134 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 13:54:31 crc kubenswrapper[4857]: I1128 13:54:31.569537 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 13:54:32 crc kubenswrapper[4857]: I1128 13:54:32.266750 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 13:54:32 crc kubenswrapper[4857]: I1128 13:54:32.563180 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:54:32 crc kubenswrapper[4857]: I1128 13:54:32.563172 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 28 13:54:36 crc kubenswrapper[4857]: I1128 13:54:36.273759 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 13:54:38 crc kubenswrapper[4857]: I1128 13:54:38.431974 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:54:38 crc kubenswrapper[4857]: I1128 13:54:38.432637 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:54:38 crc kubenswrapper[4857]: I1128 13:54:38.436608 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 13:54:38 crc kubenswrapper[4857]: I1128 13:54:38.446460 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:54:39 crc kubenswrapper[4857]: I1128 13:54:39.282627 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 13:54:39 crc kubenswrapper[4857]: I1128 13:54:39.290055 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 13:54:41 crc kubenswrapper[4857]: I1128 13:54:41.557883 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:54:41 crc kubenswrapper[4857]: I1128 13:54:41.558375 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 13:54:41 crc kubenswrapper[4857]: I1128 13:54:41.567922 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:54:41 crc kubenswrapper[4857]: I1128 13:54:41.571720 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 13:55:00 crc kubenswrapper[4857]: I1128 13:55:00.972936 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 13:55:00 crc kubenswrapper[4857]: I1128 13:55:00.973705 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" containerName="openstackclient" containerID="cri-o://c29237331f4b473cdae2e6d65bf7f88564f2faca53b3e0624b2143a3d5d0c546" gracePeriod=2 Nov 28 13:55:00 crc kubenswrapper[4857]: I1128 13:55:00.996148 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.241513 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron0f31-account-delete-t4p7d"] Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.249449 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" containerName="openstackclient" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.249479 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" containerName="openstackclient" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.249656 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" containerName="openstackclient" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.250275 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.266015 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-zhh8w"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.305976 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-f67hs"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.306227 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-f67hs" podUID="7c117ad8-9d37-4e85-b408-e2d77c8331df" containerName="openstack-network-exporter" containerID="cri-o://0e846cecd8668f3338b50a23dd0e5ca393a261a6642af341de9d050961ee27c0" gracePeriod=30 Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.325013 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-hgm54"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.336048 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron0f31-account-delete-t4p7d"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.347052 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance25bf-account-delete-78p7f"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.348598 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.364367 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance25bf-account-delete-78p7f"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.414021 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kcl9\" (UniqueName: \"kubernetes.io/projected/4c03f40d-958a-49a0-a2f7-54a1f175caf7-kube-api-access-5kcl9\") pod \"neutron0f31-account-delete-t4p7d\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.414221 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c03f40d-958a-49a0-a2f7-54a1f175caf7-operator-scripts\") pod \"neutron0f31-account-delete-t4p7d\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.414983 4857 projected.go:263] Couldn't get secret openstack/swift-conf: secret "swift-conf" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.415011 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-proxy-7ddf6b88b7-6dfnw: secret "swift-conf" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.415060 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift podName:3f3c4b68-eb9c-466a-accc-51a99bcdac06 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:01.915041088 +0000 UTC m=+1552.038982595 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift") pod "swift-proxy-7ddf6b88b7-6dfnw" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06") : secret "swift-conf" not found Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.423042 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.479143 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinderf448-account-delete-vb5h7"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.480486 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.502777 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderf448-account-delete-vb5h7"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.516374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/464d5189-d9e5-4b18-b383-a7d75a28771b-operator-scripts\") pod \"glance25bf-account-delete-78p7f\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.516420 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kcl9\" (UniqueName: \"kubernetes.io/projected/4c03f40d-958a-49a0-a2f7-54a1f175caf7-kube-api-access-5kcl9\") pod \"neutron0f31-account-delete-t4p7d\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.516616 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rqxg\" (UniqueName: \"kubernetes.io/projected/464d5189-d9e5-4b18-b383-a7d75a28771b-kube-api-access-4rqxg\") pod \"glance25bf-account-delete-78p7f\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.516650 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c03f40d-958a-49a0-a2f7-54a1f175caf7-operator-scripts\") pod \"neutron0f31-account-delete-t4p7d\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.517361 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c03f40d-958a-49a0-a2f7-54a1f175caf7-operator-scripts\") pod \"neutron0f31-account-delete-t4p7d\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.517727 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.517764 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data podName:e5ec18e7-6719-46dd-b580-303f3da41869 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:02.017751393 +0000 UTC m=+1552.141692830 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data") pod "rabbitmq-cell1-server-0" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.553473 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-f67hs_7c117ad8-9d37-4e85-b408-e2d77c8331df/openstack-network-exporter/0.log" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.553542 4857 generic.go:334] "Generic (PLEG): container finished" podID="7c117ad8-9d37-4e85-b408-e2d77c8331df" containerID="0e846cecd8668f3338b50a23dd0e5ca393a261a6642af341de9d050961ee27c0" exitCode=2 Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.553599 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-f67hs" event={"ID":"7c117ad8-9d37-4e85-b408-e2d77c8331df","Type":"ContainerDied","Data":"0e846cecd8668f3338b50a23dd0e5ca393a261a6642af341de9d050961ee27c0"} Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.584571 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kcl9\" (UniqueName: \"kubernetes.io/projected/4c03f40d-958a-49a0-a2f7-54a1f175caf7-kube-api-access-5kcl9\") pod \"neutron0f31-account-delete-t4p7d\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.619587 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnvv4\" (UniqueName: \"kubernetes.io/projected/ba0bce06-3761-4bb4-8e35-305dc48b3277-kube-api-access-gnvv4\") pod \"cinderf448-account-delete-vb5h7\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.619698 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba0bce06-3761-4bb4-8e35-305dc48b3277-operator-scripts\") pod \"cinderf448-account-delete-vb5h7\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.619786 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rqxg\" (UniqueName: \"kubernetes.io/projected/464d5189-d9e5-4b18-b383-a7d75a28771b-kube-api-access-4rqxg\") pod \"glance25bf-account-delete-78p7f\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.619857 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/464d5189-d9e5-4b18-b383-a7d75a28771b-operator-scripts\") pod \"glance25bf-account-delete-78p7f\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.620617 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/464d5189-d9e5-4b18-b383-a7d75a28771b-operator-scripts\") pod \"glance25bf-account-delete-78p7f\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.667009 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rqxg\" (UniqueName: \"kubernetes.io/projected/464d5189-d9e5-4b18-b383-a7d75a28771b-kube-api-access-4rqxg\") pod \"glance25bf-account-delete-78p7f\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.675928 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placementf611-account-delete-gckl9"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.677096 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.683863 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.704084 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementf611-account-delete-gckl9"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.719592 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.721813 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnvv4\" (UniqueName: \"kubernetes.io/projected/ba0bce06-3761-4bb4-8e35-305dc48b3277-kube-api-access-gnvv4\") pod \"cinderf448-account-delete-vb5h7\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.721893 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba0bce06-3761-4bb4-8e35-305dc48b3277-operator-scripts\") pod \"cinderf448-account-delete-vb5h7\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.722747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba0bce06-3761-4bb4-8e35-305dc48b3277-operator-scripts\") pod \"cinderf448-account-delete-vb5h7\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.782591 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnvv4\" (UniqueName: \"kubernetes.io/projected/ba0bce06-3761-4bb4-8e35-305dc48b3277-kube-api-access-gnvv4\") pod \"cinderf448-account-delete-vb5h7\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.791022 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-sb67r"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.816166 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-sb67r"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.828308 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29b25b8b-8f1c-4f60-b275-f924f1c0812a-operator-scripts\") pod \"placementf611-account-delete-gckl9\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.828450 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq78f\" (UniqueName: \"kubernetes.io/projected/29b25b8b-8f1c-4f60-b275-f924f1c0812a-kube-api-access-sq78f\") pod \"placementf611-account-delete-gckl9\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.828568 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.828609 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data podName:e5550a25-04ef-4dde-afd4-627f1df97a90 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:02.32859499 +0000 UTC m=+1552.452536427 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data") pod "rabbitmq-server-0" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90") : configmap "rabbitmq-config-data" not found Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.854673 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.875214 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.881017 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-f9rw6"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.898006 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-f9rw6"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.917470 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-f8mpb"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.947098 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-f8mpb"] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.948564 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq78f\" (UniqueName: \"kubernetes.io/projected/29b25b8b-8f1c-4f60-b275-f924f1c0812a-kube-api-access-sq78f\") pod \"placementf611-account-delete-gckl9\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.948762 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29b25b8b-8f1c-4f60-b275-f924f1c0812a-operator-scripts\") pod \"placementf611-account-delete-gckl9\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.952376 4857 projected.go:263] Couldn't get secret openstack/swift-proxy-config-data: secret "swift-proxy-config-data" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.952403 4857 projected.go:263] Couldn't get secret openstack/swift-conf: secret "swift-conf" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.952412 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.952424 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-proxy-7ddf6b88b7-6dfnw: [secret "swift-proxy-config-data" not found, secret "swift-conf" not found, configmap "swift-ring-files" not found] Nov 28 13:55:01 crc kubenswrapper[4857]: E1128 13:55:01.952502 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift podName:3f3c4b68-eb9c-466a-accc-51a99bcdac06 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:02.952485193 +0000 UTC m=+1553.076426630 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift") pod "swift-proxy-7ddf6b88b7-6dfnw" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06") : [secret "swift-proxy-config-data" not found, secret "swift-conf" not found, configmap "swift-ring-files" not found] Nov 28 13:55:01 crc kubenswrapper[4857]: I1128 13:55:01.956435 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29b25b8b-8f1c-4f60-b275-f924f1c0812a-operator-scripts\") pod \"placementf611-account-delete-gckl9\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.012185 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq78f\" (UniqueName: \"kubernetes.io/projected/29b25b8b-8f1c-4f60-b275-f924f1c0812a-kube-api-access-sq78f\") pod \"placementf611-account-delete-gckl9\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.039028 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.039390 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="ovn-northd" containerID="cri-o://cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.039754 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="openstack-network-exporter" containerID="cri-o://f18e195c94e7d7c3606c3e2d5bbadff3adfb1a2f922e9e1872782f709251b377" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.057292 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.096550 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.096917 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data podName:e5ec18e7-6719-46dd-b580-303f3da41869 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:03.096898346 +0000 UTC m=+1553.220839783 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data") pod "rabbitmq-cell1-server-0" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.097037 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican8199-account-delete-z9b9h"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.099252 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.171010 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican8199-account-delete-z9b9h"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.201235 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts\") pod \"barbican8199-account-delete-z9b9h\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.201298 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f9bt\" (UniqueName: \"kubernetes.io/projected/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-kube-api-access-2f9bt\") pod \"barbican8199-account-delete-z9b9h\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.303064 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="06f8c64b-0075-49d3-a2ae-0ecc1d03232a" path="/var/lib/kubelet/pods/06f8c64b-0075-49d3-a2ae-0ecc1d03232a/volumes" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.306004 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed" path="/var/lib/kubelet/pods/55024db7-fbb4-4f04-9a7d-1cfbad0bb3ed/volumes" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.308979 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts\") pod \"barbican8199-account-delete-z9b9h\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.309020 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f9bt\" (UniqueName: \"kubernetes.io/projected/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-kube-api-access-2f9bt\") pod \"barbican8199-account-delete-z9b9h\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.323767 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aebc3704-6a31-4813-8826-622ffb7f6934" path="/var/lib/kubelet/pods/aebc3704-6a31-4813-8826-622ffb7f6934/volumes" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.324873 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-hw6nk"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.324899 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-hw6nk"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.324913 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.325466 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-server" containerID="cri-o://997f645a4bf32bedcf75f896750b179b3b64c864f0aa44fb1505c0ce4a2004d3" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.325905 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="swift-recon-cron" containerID="cri-o://cf76c7b61e8171e795785fc16f94d5afd912c93529cbd3ce652f846abc4be50f" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326308 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-updater" containerID="cri-o://bd6d4921953db76ad5068a581723271ca2d557ab2915b227c4ec3a2e35dbb714" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326433 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-auditor" containerID="cri-o://2761311c0d0657f7895e8425cee039b5e9d1f6b44d147413193ce1a637e0d206" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326503 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="rsync" containerID="cri-o://75a66b9748e2f49b7fe56ad9c99da91918be5cc9c7c5b50c82a8f29587c6dd41" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326528 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-replicator" containerID="cri-o://043905c220a58f2cc3f72c7c5ff10d14437639fd54c68815c19eb821d3f8691b" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326557 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-expirer" containerID="cri-o://9636087b646b01f79b8ba6470a873b59e807cd17b6f1e033005a6e5655b75269" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326601 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-server" containerID="cri-o://336ae6d3cd985dc92a4379227cc3a7df30cdbac847520f8327e5fce00a85e01c" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326605 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-updater" containerID="cri-o://f27c04a27d6f3fb5f92f4acda7c42185f0f649ff426b2f0c0fc82c87eb5c2df2" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326665 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-auditor" containerID="cri-o://1dba2da3e5dc861fc5c8a1daae2fd1574ec3c6ba40ffdb0cbfcb19d46be889c1" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326673 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-reaper" containerID="cri-o://684d617680cd018b845bc46ea83aaf268880e5e96d90c1f4c74455668100a5fb" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326711 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-replicator" containerID="cri-o://797af21b1a27903599b036f4c694bee114318cb17d785c9ef036ff8854701e9f" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326742 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-auditor" containerID="cri-o://7f8c7069308f0a6173f6d99219cd32e4aee978bcd30684463830f8831fa3dc54" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326759 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-server" containerID="cri-o://fca2aa8f676e96e45c93a697eeabe06bb4bf6351b5998989a5d94cd1c765da97" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.326805 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-replicator" containerID="cri-o://4e216ae629f3bf57169773aa0918d2098ca8722a57b52a0b59f920ee5fe40042" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.331010 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-g4slf"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.336031 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts\") pod \"barbican8199-account-delete-z9b9h\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.350107 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-g4slf"] Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.412207 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.412478 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data podName:e5550a25-04ef-4dde-afd4-627f1df97a90 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:03.412463378 +0000 UTC m=+1553.536404815 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data") pod "rabbitmq-server-0" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90") : configmap "rabbitmq-config-data" not found Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.413498 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f9bt\" (UniqueName: \"kubernetes.io/projected/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-kube-api-access-2f9bt\") pod \"barbican8199-account-delete-z9b9h\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.481313 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell196aa-account-delete-rnjfc"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.482804 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.499530 4857 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-zhh8w" message=< Nov 28 13:55:02 crc kubenswrapper[4857]: Exiting ovn-controller (1) [ OK ] Nov 28 13:55:02 crc kubenswrapper[4857]: > Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.499570 4857 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-zhh8w" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" containerName="ovn-controller" containerID="cri-o://7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.499619 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-zhh8w" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" containerName="ovn-controller" containerID="cri-o://7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" gracePeriod=29 Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.501359 4857 log.go:32] "ExecSync cmd from runtime service failed" err=< Nov 28 13:55:02 crc kubenswrapper[4857]: rpc error: code = Unknown desc = command error: setns `mnt`: Bad file descriptor Nov 28 13:55:02 crc kubenswrapper[4857]: fail startup Nov 28 13:55:02 crc kubenswrapper[4857]: , stdout: , stderr: , exit code -1 Nov 28 13:55:02 crc kubenswrapper[4857]: > containerID="7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.504413 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.520831 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e is running failed: container process not found" containerID="7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.541746 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e is running failed: container process not found" containerID="7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 28 13:55:02 crc kubenswrapper[4857]: E1128 13:55:02.541813 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-zhh8w" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" containerName="ovn-controller" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.572763 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell196aa-account-delete-rnjfc"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.636095 4857 generic.go:334] "Generic (PLEG): container finished" podID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerID="f18e195c94e7d7c3606c3e2d5bbadff3adfb1a2f922e9e1872782f709251b377" exitCode=2 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.636198 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e6597fde-8e34-4ccb-8784-1b7aa3680488","Type":"ContainerDied","Data":"f18e195c94e7d7c3606c3e2d5bbadff3adfb1a2f922e9e1872782f709251b377"} Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.673437 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pq68\" (UniqueName: \"kubernetes.io/projected/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-kube-api-access-2pq68\") pod \"novacell196aa-account-delete-rnjfc\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.673534 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts\") pod \"novacell196aa-account-delete-rnjfc\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.673937 4857 generic.go:334] "Generic (PLEG): container finished" podID="de8730d5-79df-4483-a263-1dd72a7ee079" containerID="7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" exitCode=0 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.674004 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w" event={"ID":"de8730d5-79df-4483-a263-1dd72a7ee079","Type":"ContainerDied","Data":"7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e"} Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.692298 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-j66cj"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.710865 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0837f-account-delete-fhf8k"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.712147 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.763425 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-j66cj"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.798608 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-7frjn"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.798922 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" containerName="dnsmasq-dns" containerID="cri-o://d13a0f999cf39e2cf71564829a093c201bf5675270b0ded0fe06d26418a0bf83" gracePeriod=10 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.808549 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1db40e4-bf66-4d82-aa94-c54d44513220-operator-scripts\") pod \"novacell0837f-account-delete-fhf8k\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.808622 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hrgvk\" (UniqueName: \"kubernetes.io/projected/e1db40e4-bf66-4d82-aa94-c54d44513220-kube-api-access-hrgvk\") pod \"novacell0837f-account-delete-fhf8k\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.808791 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pq68\" (UniqueName: \"kubernetes.io/projected/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-kube-api-access-2pq68\") pod \"novacell196aa-account-delete-rnjfc\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.808837 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts\") pod \"novacell196aa-account-delete-rnjfc\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.809867 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts\") pod \"novacell196aa-account-delete-rnjfc\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.823427 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.823678 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="openstack-network-exporter" containerID="cri-o://028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432" gracePeriod=300 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.868794 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pq68\" (UniqueName: \"kubernetes.io/projected/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-kube-api-access-2pq68\") pod \"novacell196aa-account-delete-rnjfc\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.870076 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="bd6d4921953db76ad5068a581723271ca2d557ab2915b227c4ec3a2e35dbb714" exitCode=0 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.867239 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6b7846d5d5-ddbqf"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.878141 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"bd6d4921953db76ad5068a581723271ca2d557ab2915b227c4ec3a2e35dbb714"} Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.878443 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6b7846d5d5-ddbqf" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-api" containerID="cri-o://f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.878884 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6b7846d5d5-ddbqf" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-httpd" containerID="cri-o://47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.907572 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.908140 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="openstack-network-exporter" containerID="cri-o://97ea011d40feafd9e10daf1c4217debdca8ad06ea7b693c5eca7afa22e62ac1f" gracePeriod=300 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.910799 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hrgvk\" (UniqueName: \"kubernetes.io/projected/e1db40e4-bf66-4d82-aa94-c54d44513220-kube-api-access-hrgvk\") pod \"novacell0837f-account-delete-fhf8k\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.911207 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1db40e4-bf66-4d82-aa94-c54d44513220-operator-scripts\") pod \"novacell0837f-account-delete-fhf8k\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.912348 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1db40e4-bf66-4d82-aa94-c54d44513220-operator-scripts\") pod \"novacell0837f-account-delete-fhf8k\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.917655 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0837f-account-delete-fhf8k"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.957838 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hrgvk\" (UniqueName: \"kubernetes.io/projected/e1db40e4-bf66-4d82-aa94-c54d44513220-kube-api-access-hrgvk\") pod \"novacell0837f-account-delete-fhf8k\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.988121 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="ovsdbserver-sb" containerID="cri-o://bad96dcf4a4f03f679f428d14fce3c35afb403afb6b9ec23a4c23d134a347cf3" gracePeriod=300 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.992614 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.992892 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-log" containerID="cri-o://9d3d6c09860350f192015fe259ed4a4581cfb3b28583b924e843bda2835e7fb6" gracePeriod=30 Nov 28 13:55:02 crc kubenswrapper[4857]: I1128 13:55:02.993102 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-httpd" containerID="cri-o://7eaa667398c3e224c563e132d00cba92457ba8acc83d7c4ef18995674225d29a" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.013460 4857 projected.go:263] Couldn't get secret openstack/swift-proxy-config-data: secret "swift-proxy-config-data" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.013689 4857 projected.go:263] Couldn't get secret openstack/swift-conf: secret "swift-conf" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.013701 4857 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.013714 4857 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-proxy-7ddf6b88b7-6dfnw: [secret "swift-proxy-config-data" not found, secret "swift-conf" not found, configmap "swift-ring-files" not found] Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.013766 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift podName:3f3c4b68-eb9c-466a-accc-51a99bcdac06 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:05.013748215 +0000 UTC m=+1555.137689652 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift") pod "swift-proxy-7ddf6b88b7-6dfnw" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06") : [secret "swift-proxy-config-data" not found, secret "swift-conf" not found, configmap "swift-ring-files" not found] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.027267 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.027734 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapi379c-account-delete-7x584"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.029330 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.046623 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi379c-account-delete-7x584"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.055142 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.055384 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="cinder-scheduler" containerID="cri-o://e34bc2b2baed17dc94ca9dc3ed8f2bf2da5b6e0bf67630dbb959a6581e276f47" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.055836 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="probe" containerID="cri-o://949cd8e48f0fc8953dd188f71049a00ec864bfbb7da60444a68266e465fb949e" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.075349 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-f67hs_7c117ad8-9d37-4e85-b408-e2d77c8331df/openstack-network-exporter/0.log" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.075421 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.085136 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.085371 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-log" containerID="cri-o://0e9047254aa31e8c764eaf4e0c00c00b8b889bb9a2e0ba225c71bf05a09cdad0" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.085534 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-httpd" containerID="cri-o://d99386d5fb6210356b4912e9ae342dc21823ae6279091264ad6b9328047690e7" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.115178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7pgl\" (UniqueName: \"kubernetes.io/projected/8288e005-1d07-4989-bc64-64b3ecd62993-kube-api-access-m7pgl\") pod \"novaapi379c-account-delete-7x584\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.115299 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts\") pod \"novaapi379c-account-delete-7x584\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.116217 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.116274 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data podName:e5ec18e7-6719-46dd-b580-303f3da41869 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:05.116259495 +0000 UTC m=+1555.240200932 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data") pod "rabbitmq-cell1-server-0" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.116717 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.116741 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts podName:f1ee99c5-4515-45fd-ad45-cd7d96f85c10 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:03.616732967 +0000 UTC m=+1553.740674404 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts") pod "novacell196aa-account-delete-rnjfc" (UID: "f1ee99c5-4515-45fd-ad45-cd7d96f85c10") : configmap "openstack-cell1-scripts" not found Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.134036 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.134375 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api-log" containerID="cri-o://2393368f1d7599e575bd2865623f799eecabedba72c2c1b25e2a5dd440954069" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.134968 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api" containerID="cri-o://64a76382fda6f51faa4f808e7cb3cf14ee31449fa967229b15df56ca85a66806" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.139144 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="ovsdbserver-nb" containerID="cri-o://a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a" gracePeriod=300 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.140041 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.164016 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-794cbbfc48-m96jr"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.164341 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-794cbbfc48-m96jr" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-log" containerID="cri-o://5f6e16bc3a6a3e255193ac8544c8911785ede7f13bf7ff1f4eb96c9e2ab1632c" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.164755 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-794cbbfc48-m96jr" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-api" containerID="cri-o://10bea3eb3ec5b5f1dc962843d2fb1b65bf5e100082e1824518a74a4bdbf6d742" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.211241 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-2d754"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.222555 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c117ad8-9d37-4e85-b408-e2d77c8331df-config\") pod \"7c117ad8-9d37-4e85-b408-e2d77c8331df\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.222654 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-metrics-certs-tls-certs\") pod \"7c117ad8-9d37-4e85-b408-e2d77c8331df\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.222770 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m2mpc\" (UniqueName: \"kubernetes.io/projected/7c117ad8-9d37-4e85-b408-e2d77c8331df-kube-api-access-m2mpc\") pod \"7c117ad8-9d37-4e85-b408-e2d77c8331df\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.222861 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovs-rundir\") pod \"7c117ad8-9d37-4e85-b408-e2d77c8331df\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.222881 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-combined-ca-bundle\") pod \"7c117ad8-9d37-4e85-b408-e2d77c8331df\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.222909 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovn-rundir\") pod \"7c117ad8-9d37-4e85-b408-e2d77c8331df\" (UID: \"7c117ad8-9d37-4e85-b408-e2d77c8331df\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.223205 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7pgl\" (UniqueName: \"kubernetes.io/projected/8288e005-1d07-4989-bc64-64b3ecd62993-kube-api-access-m7pgl\") pod \"novaapi379c-account-delete-7x584\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.223313 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts\") pod \"novaapi379c-account-delete-7x584\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.233170 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "7c117ad8-9d37-4e85-b408-e2d77c8331df" (UID: "7c117ad8-9d37-4e85-b408-e2d77c8331df"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.234515 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts\") pod \"novaapi379c-account-delete-7x584\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.235308 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7c117ad8-9d37-4e85-b408-e2d77c8331df-config" (OuterVolumeSpecName: "config") pod "7c117ad8-9d37-4e85-b408-e2d77c8331df" (UID: "7c117ad8-9d37-4e85-b408-e2d77c8331df"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.235602 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "7c117ad8-9d37-4e85-b408-e2d77c8331df" (UID: "7c117ad8-9d37-4e85-b408-e2d77c8331df"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.263548 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-2d754"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.292355 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c117ad8-9d37-4e85-b408-e2d77c8331df-kube-api-access-m2mpc" (OuterVolumeSpecName: "kube-api-access-m2mpc") pod "7c117ad8-9d37-4e85-b408-e2d77c8331df" (UID: "7c117ad8-9d37-4e85-b408-e2d77c8331df"). InnerVolumeSpecName "kube-api-access-m2mpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.318709 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7pgl\" (UniqueName: \"kubernetes.io/projected/8288e005-1d07-4989-bc64-64b3ecd62993-kube-api-access-m7pgl\") pod \"novaapi379c-account-delete-7x584\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.355488 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m2mpc\" (UniqueName: \"kubernetes.io/projected/7c117ad8-9d37-4e85-b408-e2d77c8331df-kube-api-access-m2mpc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.355556 4857 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.355568 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/7c117ad8-9d37-4e85-b408-e2d77c8331df-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.355581 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7c117ad8-9d37-4e85-b408-e2d77c8331df-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.363215 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-qxt8d"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.388264 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-qxt8d"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.441392 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7ddf6b88b7-6dfnw"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.442000 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-server" containerID="cri-o://cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.442368 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-httpd" containerID="cri-o://51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.500577 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.500642 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data podName:e5550a25-04ef-4dde-afd4-627f1df97a90 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:05.500627448 +0000 UTC m=+1555.624568875 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data") pod "rabbitmq-server-0" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90") : configmap "rabbitmq-config-data" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.505667 4857 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 28 13:55:03 crc kubenswrapper[4857]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 13:55:03 crc kubenswrapper[4857]: + source /usr/local/bin/container-scripts/functions Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNBridge=br-int Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNRemote=tcp:localhost:6642 Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNEncapType=geneve Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNAvailabilityZones= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ EnableChassisAsGateway=true Nov 28 13:55:03 crc kubenswrapper[4857]: ++ PhysicalNetworks= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNHostName= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 13:55:03 crc kubenswrapper[4857]: ++ ovs_dir=/var/lib/openvswitch Nov 28 13:55:03 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 13:55:03 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 13:55:03 crc kubenswrapper[4857]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + cleanup_ovsdb_server_semaphore Nov 28 13:55:03 crc kubenswrapper[4857]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:55:03 crc kubenswrapper[4857]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 13:55:03 crc kubenswrapper[4857]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-hgm54" message=< Nov 28 13:55:03 crc kubenswrapper[4857]: Exiting ovsdb-server (5) [ OK ] Nov 28 13:55:03 crc kubenswrapper[4857]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 13:55:03 crc kubenswrapper[4857]: + source /usr/local/bin/container-scripts/functions Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNBridge=br-int Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNRemote=tcp:localhost:6642 Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNEncapType=geneve Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNAvailabilityZones= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ EnableChassisAsGateway=true Nov 28 13:55:03 crc kubenswrapper[4857]: ++ PhysicalNetworks= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNHostName= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 13:55:03 crc kubenswrapper[4857]: ++ ovs_dir=/var/lib/openvswitch Nov 28 13:55:03 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 13:55:03 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 13:55:03 crc kubenswrapper[4857]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + cleanup_ovsdb_server_semaphore Nov 28 13:55:03 crc kubenswrapper[4857]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:55:03 crc kubenswrapper[4857]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 13:55:03 crc kubenswrapper[4857]: > Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.505709 4857 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 28 13:55:03 crc kubenswrapper[4857]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 28 13:55:03 crc kubenswrapper[4857]: + source /usr/local/bin/container-scripts/functions Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNBridge=br-int Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNRemote=tcp:localhost:6642 Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNEncapType=geneve Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNAvailabilityZones= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ EnableChassisAsGateway=true Nov 28 13:55:03 crc kubenswrapper[4857]: ++ PhysicalNetworks= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ OVNHostName= Nov 28 13:55:03 crc kubenswrapper[4857]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 28 13:55:03 crc kubenswrapper[4857]: ++ ovs_dir=/var/lib/openvswitch Nov 28 13:55:03 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 28 13:55:03 crc kubenswrapper[4857]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 28 13:55:03 crc kubenswrapper[4857]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + sleep 0.5 Nov 28 13:55:03 crc kubenswrapper[4857]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 28 13:55:03 crc kubenswrapper[4857]: + cleanup_ovsdb_server_semaphore Nov 28 13:55:03 crc kubenswrapper[4857]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 28 13:55:03 crc kubenswrapper[4857]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 28 13:55:03 crc kubenswrapper[4857]: > pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" containerID="cri-o://a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.505742 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" containerID="cri-o://a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" gracePeriod=28 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.505844 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.551399 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.580238 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c117ad8-9d37-4e85-b408-e2d77c8331df" (UID: "7c117ad8-9d37-4e85-b408-e2d77c8331df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.615296 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.625103 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.633855 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts podName:f1ee99c5-4515-45fd-ad45-cd7d96f85c10 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:04.633824696 +0000 UTC m=+1554.757766123 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts") pod "novacell196aa-account-delete-rnjfc" (UID: "f1ee99c5-4515-45fd-ad45-cd7d96f85c10") : configmap "openstack-cell1-scripts" not found Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.626180 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.682001 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="rabbitmq" containerID="cri-o://af0d6704e41a2d48d923ac3295bc509705c2c44269b7920b44c7cf73327f3eda" gracePeriod=604800 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.706664 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" containerID="cri-o://b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" gracePeriod=28 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.706809 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.711075 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a is running failed: container process not found" containerID="a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.713572 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a is running failed: container process not found" containerID="a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.713802 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a is running failed: container process not found" containerID="a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 28 13:55:03 crc kubenswrapper[4857]: E1128 13:55:03.713831 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="ovsdbserver-nb" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.765101 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-d85b4cc9d-lptk6"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.765925 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener-log" containerID="cri-o://56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.766070 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener" containerID="cri-o://d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.773086 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="rabbitmq" containerID="cri-o://3bfaa6d12b2af65b2c4bc9e67c77c455db443837bdebce53cc1736765094b03f" gracePeriod=604800 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.816591 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "7c117ad8-9d37-4e85-b408-e2d77c8331df" (UID: "7c117ad8-9d37-4e85-b408-e2d77c8331df"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.819110 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinderf448-account-delete-vb5h7"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.838320 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7c117ad8-9d37-4e85-b408-e2d77c8331df-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.862479 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68dfcc5468-bgz8k"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.862776 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68dfcc5468-bgz8k" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api-log" containerID="cri-o://4b9f9dd6d4c0768cd26615fd0b66fda62c0a3074685c2e3db7b9ec59c30f07ed" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.863239 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-68dfcc5468-bgz8k" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api" containerID="cri-o://32f2e88243b10a53bc37602a5a649cb5b43c66d0e3d47ca1f87ead6c5ef19c53" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.888625 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.890169 4857 generic.go:334] "Generic (PLEG): container finished" podID="0944133e-cee5-4927-8f5e-8f781b30d224" containerID="5f6e16bc3a6a3e255193ac8544c8911785ede7f13bf7ff1f4eb96c9e2ab1632c" exitCode=143 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.890710 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-794cbbfc48-m96jr" event={"ID":"0944133e-cee5-4927-8f5e-8f781b30d224","Type":"ContainerDied","Data":"5f6e16bc3a6a3e255193ac8544c8911785ede7f13bf7ff1f4eb96c9e2ab1632c"} Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.909601 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-zhh8w" event={"ID":"de8730d5-79df-4483-a263-1dd72a7ee079","Type":"ContainerDied","Data":"401716f6caa800e1384b14d3166b9aefb675685799de23b75b75a4b68382fa14"} Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.909664 4857 scope.go:117] "RemoveContainer" containerID="7f8d74bb0a16cfc92269e5127a4271c686a559e0fa2c0a91afa634e529d6544e" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.909802 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-zhh8w" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.917457 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7d59cc7587-wt4q5"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.918075 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7d59cc7587-wt4q5" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker-log" containerID="cri-o://184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.918170 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7d59cc7587-wt4q5" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker" containerID="cri-o://4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943638 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-combined-ca-bundle\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943675 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c2wc\" (UniqueName: \"kubernetes.io/projected/de8730d5-79df-4483-a263-1dd72a7ee079-kube-api-access-9c2wc\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943691 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943708 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-ovn-controller-tls-certs\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943732 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8730d5-79df-4483-a263-1dd72a7ee079-scripts\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943745 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-log-ovn\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.943838 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run-ovn\") pod \"de8730d5-79df-4483-a263-1dd72a7ee079\" (UID: \"de8730d5-79df-4483-a263-1dd72a7ee079\") " Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.944090 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run" (OuterVolumeSpecName: "var-run") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.944205 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.945566 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de8730d5-79df-4483-a263-1dd72a7ee079-scripts" (OuterVolumeSpecName: "scripts") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.945625 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.945877 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-xwr28"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.954729 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-xwr28"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.962734 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.963046 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-log" containerID="cri-o://be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.963646 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-api" containerID="cri-o://2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.965626 4857 generic.go:334] "Generic (PLEG): container finished" podID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" exitCode=0 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.965726 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerDied","Data":"a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17"} Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.970236 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.970455 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-log" containerID="cri-o://9eb233258cdcc0845df438e157136a632bd3695d5e815406ce0875d7029891d2" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.970826 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-metadata" containerID="cri-o://bafd64668cff0693bb5967882f3c22be467009f95e4f118cff44737cfc28e0af" gracePeriod=30 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.986981 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-96aa-account-create-update-2bzpb"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.987731 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-f67hs_7c117ad8-9d37-4e85-b408-e2d77c8331df/openstack-network-exporter/0.log" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.987826 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-f67hs" event={"ID":"7c117ad8-9d37-4e85-b408-e2d77c8331df","Type":"ContainerDied","Data":"ed9e5d685bfe843d603e9dac303ba68b35babeb8ffd1730ef9c216597f3e63b2"} Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.987933 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-f67hs" Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.997059 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell196aa-account-delete-rnjfc"] Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.999074 4857 generic.go:334] "Generic (PLEG): container finished" podID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerID="2393368f1d7599e575bd2865623f799eecabedba72c2c1b25e2a5dd440954069" exitCode=143 Nov 28 13:55:03 crc kubenswrapper[4857]: I1128 13:55:03.999124 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d973a56d-fc8e-4cef-a590-d21d32242dc4","Type":"ContainerDied","Data":"2393368f1d7599e575bd2865623f799eecabedba72c2c1b25e2a5dd440954069"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.009625 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de8730d5-79df-4483-a263-1dd72a7ee079-kube-api-access-9c2wc" (OuterVolumeSpecName: "kube-api-access-9c2wc") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "kube-api-access-9c2wc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.019083 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.102:5671: connect: connection refused" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.020806 4857 generic.go:334] "Generic (PLEG): container finished" podID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerID="9d3d6c09860350f192015fe259ed4a4581cfb3b28583b924e843bda2835e7fb6" exitCode=143 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.021052 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd","Type":"ContainerDied","Data":"9d3d6c09860350f192015fe259ed4a4581cfb3b28583b924e843bda2835e7fb6"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.047478 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.047505 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c2wc\" (UniqueName: \"kubernetes.io/projected/de8730d5-79df-4483-a263-1dd72a7ee079-kube-api-access-9c2wc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.047516 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.047526 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/de8730d5-79df-4483-a263-1dd72a7ee079-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.047534 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/de8730d5-79df-4483-a263-1dd72a7ee079-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.052385 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.052717 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="2bf149d2-9beb-4394-921a-a703473391aa" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://2cd656be1985a74adb1ed52d510cb94e3e9f9d8ec5011e4fd68bc155cf37553b" gracePeriod=30 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.066659 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-96aa-account-create-update-2bzpb"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.079798 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.080466 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="e222fcd6-26e0-46af-82ab-7cf038a18195" containerName="nova-scheduler-scheduler" containerID="cri-o://9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217" gracePeriod=30 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.088930 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-khxxx"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.093803 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.096683 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-khxxx"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.104254 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.104476 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="61549c8e-2955-4350-9055-731ceb896fdc" containerName="nova-cell1-conductor-conductor" containerID="cri-o://ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6" gracePeriod=30 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.114379 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.114685 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerName="nova-cell0-conductor-conductor" containerID="cri-o://ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" gracePeriod=30 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.122229 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jvrc5"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.132865 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-jvrc5"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140768 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="75a66b9748e2f49b7fe56ad9c99da91918be5cc9c7c5b50c82a8f29587c6dd41" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140797 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="9636087b646b01f79b8ba6470a873b59e807cd17b6f1e033005a6e5655b75269" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140804 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="f27c04a27d6f3fb5f92f4acda7c42185f0f649ff426b2f0c0fc82c87eb5c2df2" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140811 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="1dba2da3e5dc861fc5c8a1daae2fd1574ec3c6ba40ffdb0cbfcb19d46be889c1" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140817 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="797af21b1a27903599b036f4c694bee114318cb17d785c9ef036ff8854701e9f" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140824 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="fca2aa8f676e96e45c93a697eeabe06bb4bf6351b5998989a5d94cd1c765da97" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140831 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="2761311c0d0657f7895e8425cee039b5e9d1f6b44d147413193ce1a637e0d206" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140837 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="043905c220a58f2cc3f72c7c5ff10d14437639fd54c68815c19eb821d3f8691b" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140845 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="336ae6d3cd985dc92a4379227cc3a7df30cdbac847520f8327e5fce00a85e01c" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140853 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="684d617680cd018b845bc46ea83aaf268880e5e96d90c1f4c74455668100a5fb" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140860 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="7f8c7069308f0a6173f6d99219cd32e4aee978bcd30684463830f8831fa3dc54" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140866 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="4e216ae629f3bf57169773aa0918d2098ca8722a57b52a0b59f920ee5fe40042" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140872 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="997f645a4bf32bedcf75f896750b179b3b64c864f0aa44fb1505c0ce4a2004d3" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.140913 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"75a66b9748e2f49b7fe56ad9c99da91918be5cc9c7c5b50c82a8f29587c6dd41"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141021 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"9636087b646b01f79b8ba6470a873b59e807cd17b6f1e033005a6e5655b75269"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141036 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"f27c04a27d6f3fb5f92f4acda7c42185f0f649ff426b2f0c0fc82c87eb5c2df2"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141047 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"1dba2da3e5dc861fc5c8a1daae2fd1574ec3c6ba40ffdb0cbfcb19d46be889c1"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141057 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"797af21b1a27903599b036f4c694bee114318cb17d785c9ef036ff8854701e9f"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"fca2aa8f676e96e45c93a697eeabe06bb4bf6351b5998989a5d94cd1c765da97"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141080 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"2761311c0d0657f7895e8425cee039b5e9d1f6b44d147413193ce1a637e0d206"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141092 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"043905c220a58f2cc3f72c7c5ff10d14437639fd54c68815c19eb821d3f8691b"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141101 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"336ae6d3cd985dc92a4379227cc3a7df30cdbac847520f8327e5fce00a85e01c"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141110 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"684d617680cd018b845bc46ea83aaf268880e5e96d90c1f4c74455668100a5fb"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141118 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"7f8c7069308f0a6173f6d99219cd32e4aee978bcd30684463830f8831fa3dc54"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141127 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"4e216ae629f3bf57169773aa0918d2098ca8722a57b52a0b59f920ee5fe40042"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.141135 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"997f645a4bf32bedcf75f896750b179b3b64c864f0aa44fb1505c0ce4a2004d3"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.143145 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-f67hs"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.149867 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.149930 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_dceb7667-07bc-486b-b65f-c87427949ffd/ovsdbserver-nb/0.log" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.150009 4857 generic.go:334] "Generic (PLEG): container finished" podID="dceb7667-07bc-486b-b65f-c87427949ffd" containerID="97ea011d40feafd9e10daf1c4217debdca8ad06ea7b693c5eca7afa22e62ac1f" exitCode=2 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.150024 4857 generic.go:334] "Generic (PLEG): container finished" podID="dceb7667-07bc-486b-b65f-c87427949ffd" containerID="a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a" exitCode=143 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.150075 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"dceb7667-07bc-486b-b65f-c87427949ffd","Type":"ContainerDied","Data":"97ea011d40feafd9e10daf1c4217debdca8ad06ea7b693c5eca7afa22e62ac1f"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.150101 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"dceb7667-07bc-486b-b65f-c87427949ffd","Type":"ContainerDied","Data":"a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.152460 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-f67hs"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.153681 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_2dfd7b5e-9e1b-4f85-9933-2f3f55cee399/ovsdbserver-sb/0.log" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.156834 4857 generic.go:334] "Generic (PLEG): container finished" podID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerID="bad96dcf4a4f03f679f428d14fce3c35afb403afb6b9ec23a4c23d134a347cf3" exitCode=143 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.156861 4857 generic.go:334] "Generic (PLEG): container finished" podID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerID="028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432" exitCode=2 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.156907 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399","Type":"ContainerDied","Data":"bad96dcf4a4f03f679f428d14fce3c35afb403afb6b9ec23a4c23d134a347cf3"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.156933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399","Type":"ContainerDied","Data":"028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.158416 4857 generic.go:334] "Generic (PLEG): container finished" podID="c3d9d9f7-3d10-4677-aaef-60d731a33857" containerID="c29237331f4b473cdae2e6d65bf7f88564f2faca53b3e0624b2143a3d5d0c546" exitCode=137 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.170713 4857 generic.go:334] "Generic (PLEG): container finished" podID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerID="0e9047254aa31e8c764eaf4e0c00c00b8b889bb9a2e0ba225c71bf05a09cdad0" exitCode=143 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.170801 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e","Type":"ContainerDied","Data":"0e9047254aa31e8c764eaf4e0c00c00b8b889bb9a2e0ba225c71bf05a09cdad0"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.189804 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderf448-account-delete-vb5h7" event={"ID":"ba0bce06-3761-4bb4-8e35-305dc48b3277","Type":"ContainerStarted","Data":"a2f8f2846268271e23b1f3ecaa4f721389aacc2ff5cf3cd57d872c567d6e117f"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.210562 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf608325-3713-4ff6-8e16-c7993618ef71" containerID="d13a0f999cf39e2cf71564829a093c201bf5675270b0ded0fe06d26418a0bf83" exitCode=0 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.210604 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" event={"ID":"bf608325-3713-4ff6-8e16-c7993618ef71","Type":"ContainerDied","Data":"d13a0f999cf39e2cf71564829a093c201bf5675270b0ded0fe06d26418a0bf83"} Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.214661 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.103:5671: connect: connection refused" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.227442 4857 scope.go:117] "RemoveContainer" containerID="0e846cecd8668f3338b50a23dd0e5ca393a261a6642af341de9d050961ee27c0" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.251665 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9" path="/var/lib/kubelet/pods/6ddb489d-a52b-47d5-9fda-c2d7ef09c1d9/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.252243 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c117ad8-9d37-4e85-b408-e2d77c8331df" path="/var/lib/kubelet/pods/7c117ad8-9d37-4e85-b408-e2d77c8331df/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.252718 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1f6b069-f687-4108-b8b3-2457822d496a" path="/var/lib/kubelet/pods/b1f6b069-f687-4108-b8b3-2457822d496a/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.253715 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b492461f-69d0-4020-84d9-68adae5ebe0e" path="/var/lib/kubelet/pods/b492461f-69d0-4020-84d9-68adae5ebe0e/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.254299 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5b67972-d610-4d3c-96e3-b8a1593f196a" path="/var/lib/kubelet/pods/b5b67972-d610-4d3c-96e3-b8a1593f196a/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.254782 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0cffe75-b02c-411f-8f5c-3eec6d36659d" path="/var/lib/kubelet/pods/c0cffe75-b02c-411f-8f5c-3eec6d36659d/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.255293 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d26ebe73-c4df-458d-a2f3-1da92d587632" path="/var/lib/kubelet/pods/d26ebe73-c4df-458d-a2f3-1da92d587632/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.256652 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d59bcbed-e787-486b-9efc-8c12f7e58960" path="/var/lib/kubelet/pods/d59bcbed-e787-486b-9efc-8c12f7e58960/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.258009 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9199b78-2d93-4877-95b8-ed8457716a3f" path="/var/lib/kubelet/pods/d9199b78-2d93-4877-95b8-ed8457716a3f/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.258640 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df3949de-0120-4289-8dfa-71e0ea70deaf" path="/var/lib/kubelet/pods/df3949de-0120-4289-8dfa-71e0ea70deaf/volumes" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.269366 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "de8730d5-79df-4483-a263-1dd72a7ee079" (UID: "de8730d5-79df-4483-a263-1dd72a7ee079"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.290082 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.315835 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="galera" containerID="cri-o://7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13" gracePeriod=30 Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.359122 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/de8730d5-79df-4483-a263-1dd72a7ee079-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.456892 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.460764 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-nb\") pod \"bf608325-3713-4ff6-8e16-c7993618ef71\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.460810 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-svc\") pod \"bf608325-3713-4ff6-8e16-c7993618ef71\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.460873 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsz48\" (UniqueName: \"kubernetes.io/projected/bf608325-3713-4ff6-8e16-c7993618ef71-kube-api-access-jsz48\") pod \"bf608325-3713-4ff6-8e16-c7993618ef71\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.460965 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-config\") pod \"bf608325-3713-4ff6-8e16-c7993618ef71\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.461062 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-sb\") pod \"bf608325-3713-4ff6-8e16-c7993618ef71\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.461135 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-swift-storage-0\") pod \"bf608325-3713-4ff6-8e16-c7993618ef71\" (UID: \"bf608325-3713-4ff6-8e16-c7993618ef71\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.471719 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf608325-3713-4ff6-8e16-c7993618ef71-kube-api-access-jsz48" (OuterVolumeSpecName: "kube-api-access-jsz48") pod "bf608325-3713-4ff6-8e16-c7993618ef71" (UID: "bf608325-3713-4ff6-8e16-c7993618ef71"). InnerVolumeSpecName "kube-api-access-jsz48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.525713 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bf608325-3713-4ff6-8e16-c7993618ef71" (UID: "bf608325-3713-4ff6-8e16-c7993618ef71"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.541914 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bf608325-3713-4ff6-8e16-c7993618ef71" (UID: "bf608325-3713-4ff6-8e16-c7993618ef71"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.564994 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-combined-ca-bundle\") pod \"c3d9d9f7-3d10-4677-aaef-60d731a33857\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.565389 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config\") pod \"c3d9d9f7-3d10-4677-aaef-60d731a33857\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.565423 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config-secret\") pod \"c3d9d9f7-3d10-4677-aaef-60d731a33857\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.565503 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8kqf\" (UniqueName: \"kubernetes.io/projected/c3d9d9f7-3d10-4677-aaef-60d731a33857-kube-api-access-h8kqf\") pod \"c3d9d9f7-3d10-4677-aaef-60d731a33857\" (UID: \"c3d9d9f7-3d10-4677-aaef-60d731a33857\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.566454 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.566472 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.566482 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsz48\" (UniqueName: \"kubernetes.io/projected/bf608325-3713-4ff6-8e16-c7993618ef71-kube-api-access-jsz48\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.583093 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3d9d9f7-3d10-4677-aaef-60d731a33857-kube-api-access-h8kqf" (OuterVolumeSpecName: "kube-api-access-h8kqf") pod "c3d9d9f7-3d10-4677-aaef-60d731a33857" (UID: "c3d9d9f7-3d10-4677-aaef-60d731a33857"). InnerVolumeSpecName "kube-api-access-h8kqf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.584364 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bf608325-3713-4ff6-8e16-c7993618ef71" (UID: "bf608325-3713-4ff6-8e16-c7993618ef71"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.597160 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "c3d9d9f7-3d10-4677-aaef-60d731a33857" (UID: "c3d9d9f7-3d10-4677-aaef-60d731a33857"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.610081 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3d9d9f7-3d10-4677-aaef-60d731a33857" (UID: "c3d9d9f7-3d10-4677-aaef-60d731a33857"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.621745 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bf608325-3713-4ff6-8e16-c7993618ef71" (UID: "bf608325-3713-4ff6-8e16-c7993618ef71"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.645051 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "c3d9d9f7-3d10-4677-aaef-60d731a33857" (UID: "c3d9d9f7-3d10-4677-aaef-60d731a33857"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.667911 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.667940 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.667965 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.667981 4857 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.667991 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8kqf\" (UniqueName: \"kubernetes.io/projected/c3d9d9f7-3d10-4677-aaef-60d731a33857-kube-api-access-h8kqf\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.668001 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3d9d9f7-3d10-4677-aaef-60d731a33857-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.668064 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.668109 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts podName:f1ee99c5-4515-45fd-ad45-cd7d96f85c10 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:06.668095006 +0000 UTC m=+1556.792036443 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts") pod "novacell196aa-account-delete-rnjfc" (UID: "f1ee99c5-4515-45fd-ad45-cd7d96f85c10") : configmap "openstack-cell1-scripts" not found Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.685427 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-config" (OuterVolumeSpecName: "config") pod "bf608325-3713-4ff6-8e16-c7993618ef71" (UID: "bf608325-3713-4ff6-8e16-c7993618ef71"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.727660 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placementf611-account-delete-gckl9"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.741835 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance25bf-account-delete-78p7f"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.772244 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf608325-3713-4ff6-8e16-c7993618ef71-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.784145 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron0f31-account-delete-t4p7d"] Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.809210 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.824724 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.839462 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.839521 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerName="nova-cell0-conductor-conductor" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.918624 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-zhh8w"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.930349 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_2dfd7b5e-9e1b-4f85-9933-2f3f55cee399/ovsdbserver-sb/0.log" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.930922 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.931201 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-zhh8w"] Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.936620 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_dceb7667-07bc-486b-b65f-c87427949ffd/ovsdbserver-nb/0.log" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.936743 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:55:04 crc kubenswrapper[4857]: E1128 13:55:04.971708 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2bf149d2_9beb_4394_921a_a703473391aa.slice/crio-conmon-2cd656be1985a74adb1ed52d510cb94e3e9f9d8ec5011e4fd68bc155cf37553b.scope\": RecentStats: unable to find data in memory cache]" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.975850 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-metrics-certs-tls-certs\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.975893 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdbserver-sb-tls-certs\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.975922 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-metrics-certs-tls-certs\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.975940 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-combined-ca-bundle\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.975974 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4msqh\" (UniqueName: \"kubernetes.io/projected/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-kube-api-access-4msqh\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976002 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-scripts\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976019 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976087 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdb-rundir\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976112 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-config\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976127 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976182 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-scripts\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976205 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-config\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976224 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-combined-ca-bundle\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976263 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdb-rundir\") pod \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\" (UID: \"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976277 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdbserver-nb-tls-certs\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.976298 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lc4bv\" (UniqueName: \"kubernetes.io/projected/dceb7667-07bc-486b-b65f-c87427949ffd-kube-api-access-lc4bv\") pod \"dceb7667-07bc-486b-b65f-c87427949ffd\" (UID: \"dceb7667-07bc-486b-b65f-c87427949ffd\") " Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.978637 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.983855 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-config" (OuterVolumeSpecName: "config") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.987674 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.988228 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-config" (OuterVolumeSpecName: "config") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.988355 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-scripts" (OuterVolumeSpecName: "scripts") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.998120 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dceb7667-07bc-486b-b65f-c87427949ffd-kube-api-access-lc4bv" (OuterVolumeSpecName: "kube-api-access-lc4bv") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "kube-api-access-lc4bv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:04 crc kubenswrapper[4857]: I1128 13:55:04.998405 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-scripts" (OuterVolumeSpecName: "scripts") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.006355 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.039862 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-kube-api-access-4msqh" (OuterVolumeSpecName: "kube-api-access-4msqh") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "kube-api-access-4msqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.039873 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.063447 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078291 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2m84z\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-kube-api-access-2m84z\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078416 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-combined-ca-bundle\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078462 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-internal-tls-certs\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078483 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-run-httpd\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078535 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-log-httpd\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078585 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-public-tls-certs\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.078622 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-config-data\") pod \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\" (UID: \"3f3c4b68-eb9c-466a-accc-51a99bcdac06\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079111 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079173 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079187 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079198 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lc4bv\" (UniqueName: \"kubernetes.io/projected/dceb7667-07bc-486b-b65f-c87427949ffd-kube-api-access-lc4bv\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079209 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4msqh\" (UniqueName: \"kubernetes.io/projected/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-kube-api-access-4msqh\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079217 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dceb7667-07bc-486b-b65f-c87427949ffd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079250 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079259 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079267 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.079281 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.080400 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.083173 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.117631 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-kube-api-access-2m84z" (OuterVolumeSpecName: "kube-api-access-2m84z") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "kube-api-access-2m84z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.123497 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.166204 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.180909 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.181123 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2m84z\" (UniqueName: \"kubernetes.io/projected/3f3c4b68-eb9c-466a-accc-51a99bcdac06-kube-api-access-2m84z\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.181215 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.181271 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3f3c4b68-eb9c-466a-accc-51a99bcdac06-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.181379 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.184412 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data podName:e5ec18e7-6719-46dd-b580-303f3da41869 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:09.184374264 +0000 UTC m=+1559.308315781 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data") pod "rabbitmq-cell1-server-0" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.221366 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican8199-account-delete-z9b9h"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.225556 4857 generic.go:334] "Generic (PLEG): container finished" podID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerID="47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc" exitCode=0 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.226101 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b7846d5d5-ddbqf" event={"ID":"960b2298-15f9-436b-93c9-04b0617c0c9b","Type":"ContainerDied","Data":"47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.229271 4857 generic.go:334] "Generic (PLEG): container finished" podID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerID="4b9f9dd6d4c0768cd26615fd0b66fda62c0a3074685c2e3db7b9ec59c30f07ed" exitCode=143 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.229363 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68dfcc5468-bgz8k" event={"ID":"63f7c690-a408-4e1f-8959-b22badb1b9dc","Type":"ContainerDied","Data":"4b9f9dd6d4c0768cd26615fd0b66fda62c0a3074685c2e3db7b9ec59c30f07ed"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.234209 4857 generic.go:334] "Generic (PLEG): container finished" podID="2bf149d2-9beb-4394-921a-a703473391aa" containerID="2cd656be1985a74adb1ed52d510cb94e3e9f9d8ec5011e4fd68bc155cf37553b" exitCode=0 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.234292 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2bf149d2-9beb-4394-921a-a703473391aa","Type":"ContainerDied","Data":"2cd656be1985a74adb1ed52d510cb94e3e9f9d8ec5011e4fd68bc155cf37553b"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.237806 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementf611-account-delete-gckl9" event={"ID":"29b25b8b-8f1c-4f60-b275-f924f1c0812a","Type":"ContainerStarted","Data":"3efe01fb1e0069f64a0116ea290822480a0c467db280b61136f0bd5525450f6b"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244653 4857 generic.go:334] "Generic (PLEG): container finished" podID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerID="cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c" exitCode=0 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244681 4857 generic.go:334] "Generic (PLEG): container finished" podID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerID="51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703" exitCode=0 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244726 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" event={"ID":"3f3c4b68-eb9c-466a-accc-51a99bcdac06","Type":"ContainerDied","Data":"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244755 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" event={"ID":"3f3c4b68-eb9c-466a-accc-51a99bcdac06","Type":"ContainerDied","Data":"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244770 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" event={"ID":"3f3c4b68-eb9c-466a-accc-51a99bcdac06","Type":"ContainerDied","Data":"12e218fa5f6f136b5238b54192e01e3874d3fc29f382727bd19e226597b2d40c"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244786 4857 scope.go:117] "RemoveContainer" containerID="cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.244916 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ddf6b88b7-6dfnw" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.248359 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance25bf-account-delete-78p7f" event={"ID":"464d5189-d9e5-4b18-b383-a7d75a28771b","Type":"ContainerStarted","Data":"07fd8b3c8c46e08717c23b656d566703c81602ce575ebd1eb20843ce520fc8ef"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.250254 4857 generic.go:334] "Generic (PLEG): container finished" podID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerID="9eb233258cdcc0845df438e157136a632bd3695d5e815406ce0875d7029891d2" exitCode=143 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.250342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10f5dca0-ca0a-4e88-838f-14affb1dead5","Type":"ContainerDied","Data":"9eb233258cdcc0845df438e157136a632bd3695d5e815406ce0875d7029891d2"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.260658 4857 generic.go:334] "Generic (PLEG): container finished" podID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerID="d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11" exitCode=0 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.260860 4857 generic.go:334] "Generic (PLEG): container finished" podID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerID="56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf" exitCode=143 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.260772 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.260715 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" event={"ID":"d38848c8-5fdb-462f-8471-a0b1d2211b82","Type":"ContainerDied","Data":"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.261438 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" event={"ID":"d38848c8-5fdb-462f-8471-a0b1d2211b82","Type":"ContainerDied","Data":"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.261461 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-d85b4cc9d-lptk6" event={"ID":"d38848c8-5fdb-462f-8471-a0b1d2211b82","Type":"ContainerDied","Data":"f60d3971b99dc2a4bf805e7c987b0264b70e269e775dd4acfad310a452479169"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.274740 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_2dfd7b5e-9e1b-4f85-9933-2f3f55cee399/ovsdbserver-sb/0.log" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.275419 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.275403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2dfd7b5e-9e1b-4f85-9933-2f3f55cee399","Type":"ContainerDied","Data":"b27101efeb3ef6efeed65a8ad3adf3ce48550833cc4fee414436587568695007"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.277770 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" event={"ID":"bf608325-3713-4ff6-8e16-c7993618ef71","Type":"ContainerDied","Data":"138864600aa88c6a4bb8c8c1b4c0edfbf27f19e4a2a11296ba0620b40673bb94"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.277863 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cd5cbd7b9-7frjn" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.279670 4857 scope.go:117] "RemoveContainer" containerID="51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.282592 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ws6jj\" (UniqueName: \"kubernetes.io/projected/d38848c8-5fdb-462f-8471-a0b1d2211b82-kube-api-access-ws6jj\") pod \"d38848c8-5fdb-462f-8471-a0b1d2211b82\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.282650 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data-custom\") pod \"d38848c8-5fdb-462f-8471-a0b1d2211b82\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.282700 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data\") pod \"d38848c8-5fdb-462f-8471-a0b1d2211b82\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.282741 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d38848c8-5fdb-462f-8471-a0b1d2211b82-logs\") pod \"d38848c8-5fdb-462f-8471-a0b1d2211b82\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.282819 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-combined-ca-bundle\") pod \"d38848c8-5fdb-462f-8471-a0b1d2211b82\" (UID: \"d38848c8-5fdb-462f-8471-a0b1d2211b82\") " Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.284795 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d38848c8-5fdb-462f-8471-a0b1d2211b82-logs" (OuterVolumeSpecName: "logs") pod "d38848c8-5fdb-462f-8471-a0b1d2211b82" (UID: "d38848c8-5fdb-462f-8471-a0b1d2211b82"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.285000 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.291511 4857 generic.go:334] "Generic (PLEG): container finished" podID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerID="949cd8e48f0fc8953dd188f71049a00ec864bfbb7da60444a68266e465fb949e" exitCode=0 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.291569 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c","Type":"ContainerDied","Data":"949cd8e48f0fc8953dd188f71049a00ec864bfbb7da60444a68266e465fb949e"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.295886 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_dceb7667-07bc-486b-b65f-c87427949ffd/ovsdbserver-nb/0.log" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.296050 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.296878 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"dceb7667-07bc-486b-b65f-c87427949ffd","Type":"ContainerDied","Data":"d6ff6c51ddadb63010adbc67c900a1ae58dea0b41dfc83b6440ff9baa3003822"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.302112 4857 generic.go:334] "Generic (PLEG): container finished" podID="c69ec619-0d17-4a49-8f97-6db48291122d" containerID="be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061" exitCode=143 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.302181 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c69ec619-0d17-4a49-8f97-6db48291122d","Type":"ContainerDied","Data":"be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.306132 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron0f31-account-delete-t4p7d" event={"ID":"4c03f40d-958a-49a0-a2f7-54a1f175caf7","Type":"ContainerStarted","Data":"cf7f9a668cadd23d1dad5e5511d43a0e4333af0716d6bf6e05fadfb9dc1a3e91"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.311024 4857 generic.go:334] "Generic (PLEG): container finished" podID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerID="184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41" exitCode=143 Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.311118 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7d59cc7587-wt4q5" event={"ID":"791bae3e-043c-4a91-8e8b-d1d574dcb008","Type":"ContainerDied","Data":"184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41"} Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.313116 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d38848c8-5fdb-462f-8471-a0b1d2211b82-kube-api-access-ws6jj" (OuterVolumeSpecName: "kube-api-access-ws6jj") pod "d38848c8-5fdb-462f-8471-a0b1d2211b82" (UID: "d38848c8-5fdb-462f-8471-a0b1d2211b82"). InnerVolumeSpecName "kube-api-access-ws6jj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.324606 4857 scope.go:117] "RemoveContainer" containerID="cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.325493 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c\": container with ID starting with cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c not found: ID does not exist" containerID="cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.325531 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c"} err="failed to get container status \"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c\": rpc error: code = NotFound desc = could not find container \"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c\": container with ID starting with cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c not found: ID does not exist" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.325579 4857 scope.go:117] "RemoveContainer" containerID="51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.325880 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703\": container with ID starting with 51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703 not found: ID does not exist" containerID="51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.325926 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703"} err="failed to get container status \"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703\": rpc error: code = NotFound desc = could not find container \"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703\": container with ID starting with 51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703 not found: ID does not exist" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.326033 4857 scope.go:117] "RemoveContainer" containerID="cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.326457 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c"} err="failed to get container status \"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c\": rpc error: code = NotFound desc = could not find container \"cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c\": container with ID starting with cf34788b7144b8ed3875b49b5d2b5b6c3a9ecc3f59a04c12ee7eae6c0054ca0c not found: ID does not exist" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.327355 4857 scope.go:117] "RemoveContainer" containerID="51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.328374 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703"} err="failed to get container status \"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703\": rpc error: code = NotFound desc = could not find container \"51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703\": container with ID starting with 51f6521ceec6d8813ef4ec98da770b7f10abace9ce85b5db6afc3223f7041703 not found: ID does not exist" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.328403 4857 scope.go:117] "RemoveContainer" containerID="d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.337123 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d38848c8-5fdb-462f-8471-a0b1d2211b82" (UID: "d38848c8-5fdb-462f-8471-a0b1d2211b82"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.376449 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-7frjn"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.385420 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ws6jj\" (UniqueName: \"kubernetes.io/projected/d38848c8-5fdb-462f-8471-a0b1d2211b82-kube-api-access-ws6jj\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.385457 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.385472 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d38848c8-5fdb-462f-8471-a0b1d2211b82-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.386304 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cd5cbd7b9-7frjn"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.388876 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.397206 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433273 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0837f-account-delete-fhf8k"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433324 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapi379c-account-delete-7x584"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433336 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-cpnwx"] Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433661 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433672 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433684 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" containerName="init" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433691 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" containerName="init" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433703 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-server" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433709 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-server" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433736 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c117ad8-9d37-4e85-b408-e2d77c8331df" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433742 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c117ad8-9d37-4e85-b408-e2d77c8331df" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433755 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" containerName="dnsmasq-dns" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433760 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" containerName="dnsmasq-dns" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433772 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-httpd" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433777 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-httpd" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433787 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener-log" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433792 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener-log" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433805 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433811 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433822 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433828 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433841 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="ovsdbserver-nb" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433847 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="ovsdbserver-nb" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433857 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="ovsdbserver-sb" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433862 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="ovsdbserver-sb" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.433874 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" containerName="ovn-controller" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.433880 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" containerName="ovn-controller" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434064 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-httpd" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434074 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="ovsdbserver-sb" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434094 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" containerName="ovn-controller" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434101 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434107 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" containerName="dnsmasq-dns" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434117 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c117ad8-9d37-4e85-b408-e2d77c8331df" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434140 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" containerName="ovsdbserver-nb" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434150 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434163 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" containerName="barbican-keystone-listener-log" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434175 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" containerName="proxy-server" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.434184 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" containerName="openstack-network-exporter" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.435395 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.442286 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpnwx"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.487607 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.487936 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.590483 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-catalog-content\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.590534 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8sgbz\" (UniqueName: \"kubernetes.io/projected/7f9668e3-0293-4d8f-aa56-ad830134b0e4-kube-api-access-8sgbz\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.590583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-utilities\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.590779 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.590836 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data podName:e5550a25-04ef-4dde-afd4-627f1df97a90 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:09.590817619 +0000 UTC m=+1559.714759056 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data") pod "rabbitmq-server-0" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90") : configmap "rabbitmq-config-data" not found Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.663697 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.669299 4857 scope.go:117] "RemoveContainer" containerID="56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.678120 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell196aa-account-delete-rnjfc"] Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.689276 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.692299 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8sgbz\" (UniqueName: \"kubernetes.io/projected/7f9668e3-0293-4d8f-aa56-ad830134b0e4-kube-api-access-8sgbz\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.692369 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-utilities\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.692563 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-catalog-content\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.692650 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.692665 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.696707 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-catalog-content\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.697196 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-utilities\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.751576 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8sgbz\" (UniqueName: \"kubernetes.io/projected/7f9668e3-0293-4d8f-aa56-ad830134b0e4-kube-api-access-8sgbz\") pod \"certified-operators-cpnwx\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.855117 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6 is running failed: container process not found" containerID="ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.856129 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6 is running failed: container process not found" containerID="ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.857213 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6 is running failed: container process not found" containerID="ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:05 crc kubenswrapper[4857]: E1128 13:55:05.857290 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="61549c8e-2955-4350-9055-731ceb896fdc" containerName="nova-cell1-conductor-conductor" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.873574 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.879054 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.902458 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.902831 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.972075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.976561 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d38848c8-5fdb-462f-8471-a0b1d2211b82" (UID: "d38848c8-5fdb-462f-8471-a0b1d2211b82"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:05 crc kubenswrapper[4857]: I1128 13:55:05.988451 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" (UID: "2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.004684 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.004713 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.004722 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.027449 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.047464 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-config-data" (OuterVolumeSpecName: "config-data") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.092096 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "dceb7667-07bc-486b-b65f-c87427949ffd" (UID: "dceb7667-07bc-486b-b65f-c87427949ffd"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.096450 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f3c4b68-eb9c-466a-accc-51a99bcdac06" (UID: "3f3c4b68-eb9c-466a-accc-51a99bcdac06"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.103324 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data" (OuterVolumeSpecName: "config-data") pod "d38848c8-5fdb-462f-8471-a0b1d2211b82" (UID: "d38848c8-5fdb-462f-8471-a0b1d2211b82"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.112033 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.112069 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.112078 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d38848c8-5fdb-462f-8471-a0b1d2211b82-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.112086 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/dceb7667-07bc-486b-b65f-c87427949ffd-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.112097 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f3c4b68-eb9c-466a-accc-51a99bcdac06-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.271063 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf608325-3713-4ff6-8e16-c7993618ef71" path="/var/lib/kubelet/pods/bf608325-3713-4ff6-8e16-c7993618ef71/volumes" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.271722 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3d9d9f7-3d10-4677-aaef-60d731a33857" path="/var/lib/kubelet/pods/c3d9d9f7-3d10-4677-aaef-60d731a33857/volumes" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.277793 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de8730d5-79df-4483-a263-1dd72a7ee079" path="/var/lib/kubelet/pods/de8730d5-79df-4483-a263-1dd72a7ee079/volumes" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.396591 4857 generic.go:334] "Generic (PLEG): container finished" podID="ba0bce06-3761-4bb4-8e35-305dc48b3277" containerID="81d6a8c7ead7745aa805448fe574ca2ff96413c2257b85ed565017d004466096" exitCode=0 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.396703 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderf448-account-delete-vb5h7" event={"ID":"ba0bce06-3761-4bb4-8e35-305dc48b3277","Type":"ContainerDied","Data":"81d6a8c7ead7745aa805448fe574ca2ff96413c2257b85ed565017d004466096"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.407462 4857 generic.go:334] "Generic (PLEG): container finished" podID="61549c8e-2955-4350-9055-731ceb896fdc" containerID="ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6" exitCode=0 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.407558 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"61549c8e-2955-4350-9055-731ceb896fdc","Type":"ContainerDied","Data":"ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.407584 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"61549c8e-2955-4350-9055-731ceb896fdc","Type":"ContainerDied","Data":"dc1f1c9f56c54cb442c097516f59bd8065ffb079f5fa0810142019e28c63167b"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.407596 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc1f1c9f56c54cb442c097516f59bd8065ffb079f5fa0810142019e28c63167b" Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.412140 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.419079 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"2bf149d2-9beb-4394-921a-a703473391aa","Type":"ContainerDied","Data":"1e17fe176054cb303258a70c4f1160d67735a4a6c043e7790ba9bb040e333179"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.419123 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e17fe176054cb303258a70c4f1160d67735a4a6c043e7790ba9bb040e333179" Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.429035 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.442743 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c03f40d-958a-49a0-a2f7-54a1f175caf7" containerID="59688c0cb3757ef0ac6c752acc711bd544b32c0b43a9e22d4498d69defc2540d" exitCode=0 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.442804 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron0f31-account-delete-t4p7d" event={"ID":"4c03f40d-958a-49a0-a2f7-54a1f175caf7","Type":"ContainerDied","Data":"59688c0cb3757ef0ac6c752acc711bd544b32c0b43a9e22d4498d69defc2540d"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.446763 4857 generic.go:334] "Generic (PLEG): container finished" podID="464d5189-d9e5-4b18-b383-a7d75a28771b" containerID="24e4d1d6de5d290d9a6d84ca43643deb0280b77042affc77061527b2eb9622bd" exitCode=0 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.447392 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance25bf-account-delete-78p7f" event={"ID":"464d5189-d9e5-4b18-b383-a7d75a28771b","Type":"ContainerDied","Data":"24e4d1d6de5d290d9a6d84ca43643deb0280b77042affc77061527b2eb9622bd"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.470468 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi379c-account-delete-7x584" event={"ID":"8288e005-1d07-4989-bc64-64b3ecd62993","Type":"ContainerStarted","Data":"9f8f48cfed230fc4f1a81c9763ba14618bf0a552a05748b0a96536a0290ba742"} Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.470536 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.470571 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="ovn-northd" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.478354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell196aa-account-delete-rnjfc" event={"ID":"f1ee99c5-4515-45fd-ad45-cd7d96f85c10","Type":"ContainerStarted","Data":"6665b97bc0f291b293104a38aadfda52e592701b4424089046b1ed83c3f616ae"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.488911 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8199-account-delete-z9b9h" event={"ID":"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd","Type":"ContainerStarted","Data":"108c5b4f3809bd209d4fa13ccf785df4865d3897aea55b48f09fd9c1415f820e"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.513428 4857 generic.go:334] "Generic (PLEG): container finished" podID="29b25b8b-8f1c-4f60-b275-f924f1c0812a" containerID="c3b59678ed12525f008d6ca809cb6ade0e8c7fdf6af8a86447b76b547d93a568" exitCode=0 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.513517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementf611-account-delete-gckl9" event={"ID":"29b25b8b-8f1c-4f60-b275-f924f1c0812a","Type":"ContainerDied","Data":"c3b59678ed12525f008d6ca809cb6ade0e8c7fdf6af8a86447b76b547d93a568"} Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.539372 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217 is running failed: container process not found" containerID="9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.540132 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217 is running failed: container process not found" containerID="9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.544100 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217 is running failed: container process not found" containerID="9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.544182 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e222fcd6-26e0-46af-82ab-7cf038a18195" containerName="nova-scheduler-scheduler" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.544589 4857 generic.go:334] "Generic (PLEG): container finished" podID="e222fcd6-26e0-46af-82ab-7cf038a18195" containerID="9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217" exitCode=0 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.544693 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e222fcd6-26e0-46af-82ab-7cf038a18195","Type":"ContainerDied","Data":"9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.544746 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e222fcd6-26e0-46af-82ab-7cf038a18195","Type":"ContainerDied","Data":"2295f3de8bb771d3b0e5e5547e934c009e2d6c187d7df4f80a67889c12cea8ff"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.544757 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2295f3de8bb771d3b0e5e5547e934c009e2d6c187d7df4f80a67889c12cea8ff" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.557131 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0837f-account-delete-fhf8k" event={"ID":"e1db40e4-bf66-4d82-aa94-c54d44513220","Type":"ContainerStarted","Data":"199bdb20409593a25b9f958703eabbf5816d41bd1fe2394b910fbf1883dc3643"} Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.599260 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.170:9292/healthcheck\": read tcp 10.217.0.2:44786->10.217.0.170:9292: read: connection reset by peer" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.599657 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-external-api-0" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.170:9292/healthcheck\": read tcp 10.217.0.2:44772->10.217.0.170:9292: read: connection reset by peer" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.653146 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.164:8776/healthcheck\": read tcp 10.217.0.2:44154->10.217.0.164:8776: read: connection reset by peer" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.659932 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.176:9292/healthcheck\": read tcp 10.217.0.2:34804->10.217.0.176:9292: read: connection reset by peer" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.660503 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.176:9292/healthcheck\": read tcp 10.217.0.2:34816->10.217.0.176:9292: read: connection reset by peer" Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.770081 4857 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.770143 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts podName:f1ee99c5-4515-45fd-ad45-cd7d96f85c10 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:10.77012745 +0000 UTC m=+1560.894068887 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts") pod "novacell196aa-account-delete-rnjfc" (UID: "f1ee99c5-4515-45fd-ad45-cd7d96f85c10") : configmap "openstack-cell1-scripts" not found Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.850676 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13 is running failed: container process not found" containerID="7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.853103 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13 is running failed: container process not found" containerID="7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.853467 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13 is running failed: container process not found" containerID="7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 28 13:55:06 crc kubenswrapper[4857]: E1128 13:55:06.853542 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13 is running failed: container process not found" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="galera" Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.920862 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.921196 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-central-agent" containerID="cri-o://c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135" gracePeriod=30 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.921584 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="sg-core" containerID="cri-o://f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d" gracePeriod=30 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.921627 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-notification-agent" containerID="cri-o://5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f" gracePeriod=30 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.921583 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="proxy-httpd" containerID="cri-o://b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a" gracePeriod=30 Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.980807 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:55:06 crc kubenswrapper[4857]: I1128 13:55:06.981168 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" containerName="kube-state-metrics" containerID="cri-o://89b75be03709a9433e326422eced730b8165104d804c031618aebe74ce32bcdd" gracePeriod=30 Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.064052 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.066514 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.067314 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="f1f87bb5-7cc1-4533-b145-d855e45205ca" containerName="memcached" containerID="cri-o://27e0b4afa9ae671a1b222c98eb790a7935045df19723929282aaa365b28ec8a1" gracePeriod=30 Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.073924 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.074058 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.084845 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.086794 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.111096 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.153523 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.153649 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.189960 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-mt7c6"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.221609 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-55l57"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.260614 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-55l57"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.279048 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b64c5866d-mkt8b"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.279561 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-5b64c5866d-mkt8b" podUID="5d4d3b72-fd05-4a47-925c-f17f77c46fc1" containerName="keystone-api" containerID="cri-o://05d50721ac2243fb0f0316bcd6f40e7732694575564655c777cddace1e0267e4" gracePeriod=30 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.306984 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-mt7c6"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.316486 4857 scope.go:117] "RemoveContainer" containerID="d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11" Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.327360 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11\": container with ID starting with d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11 not found: ID does not exist" containerID="d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.328039 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11"} err="failed to get container status \"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11\": rpc error: code = NotFound desc = could not find container \"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11\": container with ID starting with d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11 not found: ID does not exist" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.328203 4857 scope.go:117] "RemoveContainer" containerID="56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf" Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.328882 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf\": container with ID starting with 56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf not found: ID does not exist" containerID="56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.328903 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf"} err="failed to get container status \"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf\": rpc error: code = NotFound desc = could not find container \"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf\": container with ID starting with 56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf not found: ID does not exist" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.328916 4857 scope.go:117] "RemoveContainer" containerID="d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.342138 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.342510 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11"} err="failed to get container status \"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11\": rpc error: code = NotFound desc = could not find container \"d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11\": container with ID starting with d9962818d5fa228aba9f99a8cbf7802647856bad4f83e918d4d18ae1a9033d11 not found: ID does not exist" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.342635 4857 scope.go:117] "RemoveContainer" containerID="56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.344181 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf"} err="failed to get container status \"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf\": rpc error: code = NotFound desc = could not find container \"56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf\": container with ID starting with 56aaf63e257ad874a5ce8e7d3da112b7d05ea4d65d7e6e31d4807576b158a4bf not found: ID does not exist" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.344311 4857 scope.go:117] "RemoveContainer" containerID="bad96dcf4a4f03f679f428d14fce3c35afb403afb6b9ec23a4c23d134a347cf3" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.349919 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.360153 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-thjsw"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.378335 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-thjsw"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.414210 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-c1b2-account-create-update-4qfg8"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.431725 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": read tcp 10.217.0.2:57954->10.217.0.205:8775: read: connection reset by peer" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.432061 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.205:8775/\": read tcp 10.217.0.2:57938->10.217.0.205:8775: read: connection reset by peer" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.434416 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-c1b2-account-create-update-4qfg8"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.449728 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.470353 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68dfcc5468-bgz8k" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.160:9311/healthcheck\": dial tcp 10.217.0.160:9311: connect: connection refused" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.470482 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-68dfcc5468-bgz8k" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.160:9311/healthcheck\": dial tcp 10.217.0.160:9311: connect: connection refused" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.499333 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-config-data\") pod \"2bf149d2-9beb-4394-921a-a703473391aa\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.499747 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-vencrypt-tls-certs\") pod \"2bf149d2-9beb-4394-921a-a703473391aa\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.499925 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-combined-ca-bundle\") pod \"2bf149d2-9beb-4394-921a-a703473391aa\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.500091 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwb58\" (UniqueName: \"kubernetes.io/projected/2bf149d2-9beb-4394-921a-a703473391aa-kube-api-access-hwb58\") pod \"2bf149d2-9beb-4394-921a-a703473391aa\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.501152 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zcxwt\" (UniqueName: \"kubernetes.io/projected/e222fcd6-26e0-46af-82ab-7cf038a18195-kube-api-access-zcxwt\") pod \"e222fcd6-26e0-46af-82ab-7cf038a18195\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.501351 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-combined-ca-bundle\") pod \"e222fcd6-26e0-46af-82ab-7cf038a18195\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.502070 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-nova-novncproxy-tls-certs\") pod \"2bf149d2-9beb-4394-921a-a703473391aa\" (UID: \"2bf149d2-9beb-4394-921a-a703473391aa\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.502229 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-config-data\") pod \"e222fcd6-26e0-46af-82ab-7cf038a18195\" (UID: \"e222fcd6-26e0-46af-82ab-7cf038a18195\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.500883 4857 scope.go:117] "RemoveContainer" containerID="028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.511655 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e222fcd6-26e0-46af-82ab-7cf038a18195-kube-api-access-zcxwt" (OuterVolumeSpecName: "kube-api-access-zcxwt") pod "e222fcd6-26e0-46af-82ab-7cf038a18195" (UID: "e222fcd6-26e0-46af-82ab-7cf038a18195"). InnerVolumeSpecName "kube-api-access-zcxwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.511807 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bf149d2-9beb-4394-921a-a703473391aa-kube-api-access-hwb58" (OuterVolumeSpecName: "kube-api-access-hwb58") pod "2bf149d2-9beb-4394-921a-a703473391aa" (UID: "2bf149d2-9beb-4394-921a-a703473391aa"). InnerVolumeSpecName "kube-api-access-hwb58". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.553664 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-config-data" (OuterVolumeSpecName: "config-data") pod "2bf149d2-9beb-4394-921a-a703473391aa" (UID: "2bf149d2-9beb-4394-921a-a703473391aa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.570126 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2bf149d2-9beb-4394-921a-a703473391aa" (UID: "2bf149d2-9beb-4394-921a-a703473391aa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.586418 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e222fcd6-26e0-46af-82ab-7cf038a18195" (UID: "e222fcd6-26e0-46af-82ab-7cf038a18195"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.590232 4857 generic.go:334] "Generic (PLEG): container finished" podID="d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" containerID="89b75be03709a9433e326422eced730b8165104d804c031618aebe74ce32bcdd" exitCode=2 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.590307 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45","Type":"ContainerDied","Data":"89b75be03709a9433e326422eced730b8165104d804c031618aebe74ce32bcdd"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.598464 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.604435 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.604468 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwb58\" (UniqueName: \"kubernetes.io/projected/2bf149d2-9beb-4394-921a-a703473391aa-kube-api-access-hwb58\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.604482 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zcxwt\" (UniqueName: \"kubernetes.io/projected/e222fcd6-26e0-46af-82ab-7cf038a18195-kube-api-access-zcxwt\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.604494 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.604504 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.615291 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-cj65m"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.618606 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.618704 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "2bf149d2-9beb-4394-921a-a703473391aa" (UID: "2bf149d2-9beb-4394-921a-a703473391aa"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.621697 4857 generic.go:334] "Generic (PLEG): container finished" podID="0944133e-cee5-4927-8f5e-8f781b30d224" containerID="10bea3eb3ec5b5f1dc962843d2fb1b65bf5e100082e1824518a74a4bdbf6d742" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.621817 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-794cbbfc48-m96jr" event={"ID":"0944133e-cee5-4927-8f5e-8f781b30d224","Type":"ContainerDied","Data":"10bea3eb3ec5b5f1dc962843d2fb1b65bf5e100082e1824518a74a4bdbf6d742"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.638252 4857 scope.go:117] "RemoveContainer" containerID="d13a0f999cf39e2cf71564829a093c201bf5675270b0ded0fe06d26418a0bf83" Nov 28 13:55:07 crc kubenswrapper[4857]: E1128 13:55:07.640109 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432\": container with ID starting with 028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432 not found: ID does not exist" containerID="028f12410d5a79779844276523096407b815d65be988d8365264dc7330864432" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.642396 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-cj65m"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.649623 4857 generic.go:334] "Generic (PLEG): container finished" podID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerID="bafd64668cff0693bb5967882f3c22be467009f95e4f118cff44737cfc28e0af" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.649711 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10f5dca0-ca0a-4e88-838f-14affb1dead5","Type":"ContainerDied","Data":"bafd64668cff0693bb5967882f3c22be467009f95e4f118cff44737cfc28e0af"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.669692 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-config-data" (OuterVolumeSpecName: "config-data") pod "e222fcd6-26e0-46af-82ab-7cf038a18195" (UID: "e222fcd6-26e0-46af-82ab-7cf038a18195"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.671669 4857 generic.go:334] "Generic (PLEG): container finished" podID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerID="7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.671737 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"75c7c292-3658-4264-b86b-2a825aeb9ad4","Type":"ContainerDied","Data":"7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.671769 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"75c7c292-3658-4264-b86b-2a825aeb9ad4","Type":"ContainerDied","Data":"28dba4911a2e12d05712cbcb19c532e932a4a8560b01e9701722b9a72b4a8715"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.671779 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28dba4911a2e12d05712cbcb19c532e932a4a8560b01e9701722b9a72b4a8715" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.674750 4857 generic.go:334] "Generic (PLEG): container finished" podID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerID="64a76382fda6f51faa4f808e7cb3cf14ee31449fa967229b15df56ca85a66806" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.674839 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d973a56d-fc8e-4cef-a590-d21d32242dc4","Type":"ContainerDied","Data":"64a76382fda6f51faa4f808e7cb3cf14ee31449fa967229b15df56ca85a66806"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.678371 4857 generic.go:334] "Generic (PLEG): container finished" podID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerID="7eaa667398c3e224c563e132d00cba92457ba8acc83d7c4ef18995674225d29a" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.678431 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd","Type":"ContainerDied","Data":"7eaa667398c3e224c563e132d00cba92457ba8acc83d7c4ef18995674225d29a"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.678457 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd","Type":"ContainerDied","Data":"b9b1ae6b7705e6c16c24f2a3693230e80524ec07683afc7a556022f8669e5aaf"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.678467 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9b1ae6b7705e6c16c24f2a3693230e80524ec07683afc7a556022f8669e5aaf" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.687535 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.689361 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0837f-account-delete-fhf8k"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.689786 4857 generic.go:334] "Generic (PLEG): container finished" podID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerID="b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.690544 4857 generic.go:334] "Generic (PLEG): container finished" podID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerID="f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d" exitCode=2 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.690210 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerDied","Data":"b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.690771 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerDied","Data":"f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.708156 4857 generic.go:334] "Generic (PLEG): container finished" podID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerID="d99386d5fb6210356b4912e9ae342dc21823ae6279091264ad6b9328047690e7" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.708383 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e","Type":"ContainerDied","Data":"d99386d5fb6210356b4912e9ae342dc21823ae6279091264ad6b9328047690e7"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.710335 4857 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.710376 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e222fcd6-26e0-46af-82ab-7cf038a18195-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.714782 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.724606 4857 generic.go:334] "Generic (PLEG): container finished" podID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerID="32f2e88243b10a53bc37602a5a649cb5b43c66d0e3d47ca1f87ead6c5ef19c53" exitCode=0 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.724838 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68dfcc5468-bgz8k" event={"ID":"63f7c690-a408-4e1f-8959-b22badb1b9dc","Type":"ContainerDied","Data":"32f2e88243b10a53bc37602a5a649cb5b43c66d0e3d47ca1f87ead6c5ef19c53"} Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.725226 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.727295 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.741267 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-837f-account-create-update-x4tph"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.762362 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="f6c9b673-669e-464a-b012-8b39314e1990" containerName="galera" containerID="cri-o://3d5a1e70902743d5830edbb49ef840b594da301bdcf85c3dfb36991d40ad00b1" gracePeriod=30 Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.786034 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-837f-account-create-update-x4tph"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810752 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vr55k\" (UniqueName: \"kubernetes.io/projected/61549c8e-2955-4350-9055-731ceb896fdc-kube-api-access-vr55k\") pod \"61549c8e-2955-4350-9055-731ceb896fdc\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810802 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-generated\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810839 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-config-data\") pod \"61549c8e-2955-4350-9055-731ceb896fdc\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810855 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7zlx\" (UniqueName: \"kubernetes.io/projected/75c7c292-3658-4264-b86b-2a825aeb9ad4-kube-api-access-t7zlx\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-combined-ca-bundle\") pod \"61549c8e-2955-4350-9055-731ceb896fdc\" (UID: \"61549c8e-2955-4350-9055-731ceb896fdc\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810903 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-logs\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810925 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-public-tls-certs\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.811528 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.810956 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-default\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814057 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-scripts\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814084 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-operator-scripts\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814116 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-galera-tls-certs\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814220 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-combined-ca-bundle\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814247 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-kolla-config\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814275 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-httpd-run\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814310 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-combined-ca-bundle\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"75c7c292-3658-4264-b86b-2a825aeb9ad4\" (UID: \"75c7c292-3658-4264-b86b-2a825aeb9ad4\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814351 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814407 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vktvk\" (UniqueName: \"kubernetes.io/projected/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-kube-api-access-vktvk\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.814443 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-config-data\") pod \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\" (UID: \"4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd\") " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.816072 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.817241 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.818429 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.819284 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-logs" (OuterVolumeSpecName: "logs") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.819696 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.823157 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.827802 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-7ddf6b88b7-6dfnw"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.858067 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-7ddf6b88b7-6dfnw"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.858180 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-scripts" (OuterVolumeSpecName: "scripts") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.874304 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75c7c292-3658-4264-b86b-2a825aeb9ad4-kube-api-access-t7zlx" (OuterVolumeSpecName: "kube-api-access-t7zlx") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "kube-api-access-t7zlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.876137 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.876294 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61549c8e-2955-4350-9055-731ceb896fdc-kube-api-access-vr55k" (OuterVolumeSpecName: "kube-api-access-vr55k") pod "61549c8e-2955-4350-9055-731ceb896fdc" (UID: "61549c8e-2955-4350-9055-731ceb896fdc"). InnerVolumeSpecName "kube-api-access-vr55k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.879569 4857 scope.go:117] "RemoveContainer" containerID="565059e035a1a68340f03f186060685bb0562fc858f4fe8a856d1066b91f7e02" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.913416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-kube-api-access-vktvk" (OuterVolumeSpecName: "kube-api-access-vktvk") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "kube-api-access-vktvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922258 4857 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922282 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922309 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922322 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vktvk\" (UniqueName: \"kubernetes.io/projected/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-kube-api-access-vktvk\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922343 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vr55k\" (UniqueName: \"kubernetes.io/projected/61549c8e-2955-4350-9055-731ceb896fdc-kube-api-access-vr55k\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922355 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/75c7c292-3658-4264-b86b-2a825aeb9ad4-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922367 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7zlx\" (UniqueName: \"kubernetes.io/projected/75c7c292-3658-4264-b86b-2a825aeb9ad4-kube-api-access-t7zlx\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922380 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922392 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.922402 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/75c7c292-3658-4264-b86b-2a825aeb9ad4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.930654 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-d85b4cc9d-lptk6"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.934288 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-d85b4cc9d-lptk6"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.957000 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-w6xq8"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.971189 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-w6xq8"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.978591 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-379c-account-create-update-b9q87"] Nov 28 13:55:07 crc kubenswrapper[4857]: I1128 13:55:07.992518 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "mysql-db") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.004862 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-379c-account-create-update-b9q87"] Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.027421 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.034555 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi379c-account-delete-7x584"] Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.099692 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-config-data" (OuterVolumeSpecName: "config-data") pod "61549c8e-2955-4350-9055-731ceb896fdc" (UID: "61549c8e-2955-4350-9055-731ceb896fdc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.130489 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.181804 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.189872 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.196605 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.201851 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "2bf149d2-9beb-4394-921a-a703473391aa" (UID: "2bf149d2-9beb-4394-921a-a703473391aa"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.203422 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.217268 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "61549c8e-2955-4350-9055-731ceb896fdc" (UID: "61549c8e-2955-4350-9055-731ceb896fdc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.228082 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237062 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237105 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237118 4857 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bf149d2-9beb-4394-921a-a703473391aa-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237129 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/61549c8e-2955-4350-9055-731ceb896fdc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237138 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237146 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.237156 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.242399 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28ddb94c-f564-4ac2-b665-02f5c1b7d96d" path="/var/lib/kubelet/pods/28ddb94c-f564-4ac2-b665-02f5c1b7d96d/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.243180 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f3c4b68-eb9c-466a-accc-51a99bcdac06" path="/var/lib/kubelet/pods/3f3c4b68-eb9c-466a-accc-51a99bcdac06/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.246454 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62737fad-a99b-41e3-9333-f3dd199563a7" path="/var/lib/kubelet/pods/62737fad-a99b-41e3-9333-f3dd199563a7/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.247145 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6747722c-3764-4c1f-a2fb-5e604ccf27da" path="/var/lib/kubelet/pods/6747722c-3764-4c1f-a2fb-5e604ccf27da/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.247814 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c52ccccd-b22d-48e7-a20f-a612751942e5" path="/var/lib/kubelet/pods/c52ccccd-b22d-48e7-a20f-a612751942e5/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.249114 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c908dccd-c878-4dab-8186-632eb7750cff" path="/var/lib/kubelet/pods/c908dccd-c878-4dab-8186-632eb7750cff/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.250799 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0502ed7-3dd0-48b6-ba25-44dae8b21aef" path="/var/lib/kubelet/pods/d0502ed7-3dd0-48b6-ba25-44dae8b21aef/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.251690 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d38848c8-5fdb-462f-8471-a0b1d2211b82" path="/var/lib/kubelet/pods/d38848c8-5fdb-462f-8471-a0b1d2211b82/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.252570 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f95fe79f-e849-42ef-bb76-1fe84548c3ae" path="/var/lib/kubelet/pods/f95fe79f-e849-42ef-bb76-1fe84548c3ae/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.253927 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba" path="/var/lib/kubelet/pods/ffd7fd7a-f9cd-4d8a-a8e9-70959c5d2eba/volumes" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.281203 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-config-data" (OuterVolumeSpecName: "config-data") pod "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" (UID: "4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.305248 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "75c7c292-3658-4264-b86b-2a825aeb9ad4" (UID: "75c7c292-3658-4264-b86b-2a825aeb9ad4"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.338776 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.339028 4857 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/75c7c292-3658-4264-b86b-2a825aeb9ad4-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.498746 4857 scope.go:117] "RemoveContainer" containerID="c29237331f4b473cdae2e6d65bf7f88564f2faca53b3e0624b2143a3d5d0c546" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.500617 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.518359 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.537405 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.537501 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.545927 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kgtl\" (UniqueName: \"kubernetes.io/projected/0944133e-cee5-4927-8f5e-8f781b30d224-kube-api-access-4kgtl\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546004 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-internal-tls-certs\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546164 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-config-data\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546204 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-scripts\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546261 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-httpd-run\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546335 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546379 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-logs\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546464 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0944133e-cee5-4927-8f5e-8f781b30d224-logs\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546878 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-internal-tls-certs\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.546979 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-config-data\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.547021 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-combined-ca-bundle\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.547066 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-public-tls-certs\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.547095 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-scripts\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.547119 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvswz\" (UniqueName: \"kubernetes.io/projected/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-kube-api-access-fvswz\") pod \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\" (UID: \"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.547163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-combined-ca-bundle\") pod \"0944133e-cee5-4927-8f5e-8f781b30d224\" (UID: \"0944133e-cee5-4927-8f5e-8f781b30d224\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.549772 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-logs" (OuterVolumeSpecName: "logs") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.579192 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.580134 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0944133e-cee5-4927-8f5e-8f781b30d224-logs" (OuterVolumeSpecName: "logs") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.580134 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-kube-api-access-fvswz" (OuterVolumeSpecName: "kube-api-access-fvswz") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "kube-api-access-fvswz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.580149 4857 scope.go:117] "RemoveContainer" containerID="97ea011d40feafd9e10daf1c4217debdca8ad06ea7b693c5eca7afa22e62ac1f" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.583122 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-scripts" (OuterVolumeSpecName: "scripts") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.583618 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.587135 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-scripts" (OuterVolumeSpecName: "scripts") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.592145 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0944133e-cee5-4927-8f5e-8f781b30d224-kube-api-access-4kgtl" (OuterVolumeSpecName: "kube-api-access-4kgtl") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "kube-api-access-4kgtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.597252 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.606654 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.609508 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.617913 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.634488 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.651629 4857 scope.go:117] "RemoveContainer" containerID="a8600b616c8309585de43967afe2ae78b2ef2823ae669a2d23786e69e65b2a4a" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653605 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653655 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvswz\" (UniqueName: \"kubernetes.io/projected/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-kube-api-access-fvswz\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653676 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kgtl\" (UniqueName: \"kubernetes.io/projected/0944133e-cee5-4927-8f5e-8f781b30d224-kube-api-access-4kgtl\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653689 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653701 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653717 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.653728 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0944133e-cee5-4927-8f5e-8f781b30d224-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.665578 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.689142 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.690253 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.715868 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.723567 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.726071 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.735534 4857 generic.go:334] "Generic (PLEG): container finished" podID="c69ec619-0d17-4a49-8f97-6db48291122d" containerID="2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301" exitCode=0 Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.735591 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c69ec619-0d17-4a49-8f97-6db48291122d","Type":"ContainerDied","Data":"2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.735617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c69ec619-0d17-4a49-8f97-6db48291122d","Type":"ContainerDied","Data":"e61e758e98622c51a24780646978ed2c6c2596c72fab8d38d595a32c54cb9376"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.735634 4857 scope.go:117] "RemoveContainer" containerID="2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.735829 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.739479 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-config-data" (OuterVolumeSpecName: "config-data") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.740045 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.750116 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-config-data" (OuterVolumeSpecName: "config-data") pod "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" (UID: "a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755600 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-internal-tls-certs\") pod \"c69ec619-0d17-4a49-8f97-6db48291122d\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755639 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10f5dca0-ca0a-4e88-838f-14affb1dead5-logs\") pod \"10f5dca0-ca0a-4e88-838f-14affb1dead5\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755667 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnvv4\" (UniqueName: \"kubernetes.io/projected/ba0bce06-3761-4bb4-8e35-305dc48b3277-kube-api-access-gnvv4\") pod \"ba0bce06-3761-4bb4-8e35-305dc48b3277\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755686 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-combined-ca-bundle\") pod \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755713 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-public-tls-certs\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755728 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8278s\" (UniqueName: \"kubernetes.io/projected/c69ec619-0d17-4a49-8f97-6db48291122d-kube-api-access-8278s\") pod \"c69ec619-0d17-4a49-8f97-6db48291122d\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755747 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kz9m\" (UniqueName: \"kubernetes.io/projected/10f5dca0-ca0a-4e88-838f-14affb1dead5-kube-api-access-5kz9m\") pod \"10f5dca0-ca0a-4e88-838f-14affb1dead5\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755762 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-config-data\") pod \"10f5dca0-ca0a-4e88-838f-14affb1dead5\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755778 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data-custom\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755798 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c69ec619-0d17-4a49-8f97-6db48291122d-logs\") pod \"c69ec619-0d17-4a49-8f97-6db48291122d\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755813 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-nova-metadata-tls-certs\") pod \"10f5dca0-ca0a-4e88-838f-14affb1dead5\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755833 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755852 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rqxg\" (UniqueName: \"kubernetes.io/projected/464d5189-d9e5-4b18-b383-a7d75a28771b-kube-api-access-4rqxg\") pod \"464d5189-d9e5-4b18-b383-a7d75a28771b\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755869 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kcl9\" (UniqueName: \"kubernetes.io/projected/4c03f40d-958a-49a0-a2f7-54a1f175caf7-kube-api-access-5kcl9\") pod \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755888 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-config\") pod \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755910 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d973a56d-fc8e-4cef-a590-d21d32242dc4-etc-machine-id\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.755929 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-scripts\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760278 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d973a56d-fc8e-4cef-a590-d21d32242dc4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760402 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c69ec619-0d17-4a49-8f97-6db48291122d-logs" (OuterVolumeSpecName: "logs") pod "c69ec619-0d17-4a49-8f97-6db48291122d" (UID: "c69ec619-0d17-4a49-8f97-6db48291122d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760503 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10f5dca0-ca0a-4e88-838f-14affb1dead5-logs" (OuterVolumeSpecName: "logs") pod "10f5dca0-ca0a-4e88-838f-14affb1dead5" (UID: "10f5dca0-ca0a-4e88-838f-14affb1dead5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760790 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-internal-tls-certs\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760830 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c03f40d-958a-49a0-a2f7-54a1f175caf7-operator-scripts\") pod \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\" (UID: \"4c03f40d-958a-49a0-a2f7-54a1f175caf7\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760883 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d973a56d-fc8e-4cef-a590-d21d32242dc4-logs\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760910 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/464d5189-d9e5-4b18-b383-a7d75a28771b-operator-scripts\") pod \"464d5189-d9e5-4b18-b383-a7d75a28771b\" (UID: \"464d5189-d9e5-4b18-b383-a7d75a28771b\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760936 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-combined-ca-bundle\") pod \"c69ec619-0d17-4a49-8f97-6db48291122d\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.760993 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-combined-ca-bundle\") pod \"10f5dca0-ca0a-4e88-838f-14affb1dead5\" (UID: \"10f5dca0-ca0a-4e88-838f-14affb1dead5\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761028 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-config-data\") pod \"c69ec619-0d17-4a49-8f97-6db48291122d\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761093 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba0bce06-3761-4bb4-8e35-305dc48b3277-operator-scripts\") pod \"ba0bce06-3761-4bb4-8e35-305dc48b3277\" (UID: \"ba0bce06-3761-4bb4-8e35-305dc48b3277\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761132 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbqm7\" (UniqueName: \"kubernetes.io/projected/d973a56d-fc8e-4cef-a590-d21d32242dc4-kube-api-access-lbqm7\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761157 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-combined-ca-bundle\") pod \"d973a56d-fc8e-4cef-a590-d21d32242dc4\" (UID: \"d973a56d-fc8e-4cef-a590-d21d32242dc4\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761191 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-public-tls-certs\") pod \"c69ec619-0d17-4a49-8f97-6db48291122d\" (UID: \"c69ec619-0d17-4a49-8f97-6db48291122d\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761215 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79cqm\" (UniqueName: \"kubernetes.io/projected/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-api-access-79cqm\") pod \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761253 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sq78f\" (UniqueName: \"kubernetes.io/projected/29b25b8b-8f1c-4f60-b275-f924f1c0812a-kube-api-access-sq78f\") pod \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761281 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-certs\") pod \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\" (UID: \"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761305 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29b25b8b-8f1c-4f60-b275-f924f1c0812a-operator-scripts\") pod \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\" (UID: \"29b25b8b-8f1c-4f60-b275-f924f1c0812a\") " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761904 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.761927 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c69ec619-0d17-4a49-8f97-6db48291122d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.768496 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/464d5189-d9e5-4b18-b383-a7d75a28771b-kube-api-access-4rqxg" (OuterVolumeSpecName: "kube-api-access-4rqxg") pod "464d5189-d9e5-4b18-b383-a7d75a28771b" (UID: "464d5189-d9e5-4b18-b383-a7d75a28771b"). InnerVolumeSpecName "kube-api-access-4rqxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.768720 4857 scope.go:117] "RemoveContainer" containerID="be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.768929 4857 generic.go:334] "Generic (PLEG): container finished" podID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerID="c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135" exitCode=0 Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.769035 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerDied","Data":"c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.769609 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.771030 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d973a56d-fc8e-4cef-a590-d21d32242dc4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.771142 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.770514 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d973a56d-fc8e-4cef-a590-d21d32242dc4-logs" (OuterVolumeSpecName: "logs") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.776576 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba0bce06-3761-4bb4-8e35-305dc48b3277-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ba0bce06-3761-4bb4-8e35-305dc48b3277" (UID: "ba0bce06-3761-4bb4-8e35-305dc48b3277"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.778318 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c03f40d-958a-49a0-a2f7-54a1f175caf7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4c03f40d-958a-49a0-a2f7-54a1f175caf7" (UID: "4c03f40d-958a-49a0-a2f7-54a1f175caf7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.779152 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b25b8b-8f1c-4f60-b275-f924f1c0812a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29b25b8b-8f1c-4f60-b275-f924f1c0812a" (UID: "29b25b8b-8f1c-4f60-b275-f924f1c0812a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.779783 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/464d5189-d9e5-4b18-b383-a7d75a28771b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "464d5189-d9e5-4b18-b383-a7d75a28771b" (UID: "464d5189-d9e5-4b18-b383-a7d75a28771b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.786842 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c69ec619-0d17-4a49-8f97-6db48291122d-kube-api-access-8278s" (OuterVolumeSpecName: "kube-api-access-8278s") pod "c69ec619-0d17-4a49-8f97-6db48291122d" (UID: "c69ec619-0d17-4a49-8f97-6db48291122d"). InnerVolumeSpecName "kube-api-access-8278s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.776245 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.792019 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.792031 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10f5dca0-ca0a-4e88-838f-14affb1dead5-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.809001 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba0bce06-3761-4bb4-8e35-305dc48b3277-kube-api-access-gnvv4" (OuterVolumeSpecName: "kube-api-access-gnvv4") pod "ba0bce06-3761-4bb4-8e35-305dc48b3277" (UID: "ba0bce06-3761-4bb4-8e35-305dc48b3277"). InnerVolumeSpecName "kube-api-access-gnvv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.811396 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.811766 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"d64a95a8-7e15-40a3-b2f4-54c65c2cbf45","Type":"ContainerDied","Data":"1fdca73510f3a2dc5f166a8adda9ecf23dd2406cb1bad0e449a32ebe10b5db5e"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.814887 4857 generic.go:334] "Generic (PLEG): container finished" podID="f1ee99c5-4515-45fd-ad45-cd7d96f85c10" containerID="cfbb9c4939e99585e8309aa60ce1b27ebc930232a5842d8afc27834efe50659f" exitCode=1 Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.815116 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell196aa-account-delete-rnjfc" event={"ID":"f1ee99c5-4515-45fd-ad45-cd7d96f85c10","Type":"ContainerDied","Data":"cfbb9c4939e99585e8309aa60ce1b27ebc930232a5842d8afc27834efe50659f"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.817724 4857 generic.go:334] "Generic (PLEG): container finished" podID="f1f87bb5-7cc1-4533-b145-d855e45205ca" containerID="27e0b4afa9ae671a1b222c98eb790a7935045df19723929282aaa365b28ec8a1" exitCode=0 Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.817778 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f1f87bb5-7cc1-4533-b145-d855e45205ca","Type":"ContainerDied","Data":"27e0b4afa9ae671a1b222c98eb790a7935045df19723929282aaa365b28ec8a1"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.822640 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-api-access-79cqm" (OuterVolumeSpecName: "kube-api-access-79cqm") pod "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" (UID: "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45"). InnerVolumeSpecName "kube-api-access-79cqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.822814 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.822884 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29b25b8b-8f1c-4f60-b275-f924f1c0812a-kube-api-access-sq78f" (OuterVolumeSpecName: "kube-api-access-sq78f") pod "29b25b8b-8f1c-4f60-b275-f924f1c0812a" (UID: "29b25b8b-8f1c-4f60-b275-f924f1c0812a"). InnerVolumeSpecName "kube-api-access-sq78f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.824360 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-794cbbfc48-m96jr" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.824529 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10f5dca0-ca0a-4e88-838f-14affb1dead5-kube-api-access-5kz9m" (OuterVolumeSpecName: "kube-api-access-5kz9m") pod "10f5dca0-ca0a-4e88-838f-14affb1dead5" (UID: "10f5dca0-ca0a-4e88-838f-14affb1dead5"). InnerVolumeSpecName "kube-api-access-5kz9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.824574 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-794cbbfc48-m96jr" event={"ID":"0944133e-cee5-4927-8f5e-8f781b30d224","Type":"ContainerDied","Data":"c0821707a927754dd25c44eaa6ce2b49c5a796281c360b30199708467b898da3"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.825304 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-scripts" (OuterVolumeSpecName: "scripts") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.839859 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d973a56d-fc8e-4cef-a590-d21d32242dc4-kube-api-access-lbqm7" (OuterVolumeSpecName: "kube-api-access-lbqm7") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "kube-api-access-lbqm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.845107 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0837f-account-delete-fhf8k" event={"ID":"e1db40e4-bf66-4d82-aa94-c54d44513220","Type":"ContainerStarted","Data":"62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.845276 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novacell0837f-account-delete-fhf8k" podUID="e1db40e4-bf66-4d82-aa94-c54d44513220" containerName="mariadb-account-delete" containerID="cri-o://62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c" gracePeriod=30 Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.857628 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c03f40d-958a-49a0-a2f7-54a1f175caf7-kube-api-access-5kcl9" (OuterVolumeSpecName: "kube-api-access-5kcl9") pod "4c03f40d-958a-49a0-a2f7-54a1f175caf7" (UID: "4c03f40d-958a-49a0-a2f7-54a1f175caf7"). InnerVolumeSpecName "kube-api-access-5kcl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.857753 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance25bf-account-delete-78p7f" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.859108 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance25bf-account-delete-78p7f" event={"ID":"464d5189-d9e5-4b18-b383-a7d75a28771b","Type":"ContainerDied","Data":"07fd8b3c8c46e08717c23b656d566703c81602ce575ebd1eb20843ce520fc8ef"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.859162 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07fd8b3c8c46e08717c23b656d566703c81602ce575ebd1eb20843ce520fc8ef" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894147 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kz9m\" (UniqueName: \"kubernetes.io/projected/10f5dca0-ca0a-4e88-838f-14affb1dead5-kube-api-access-5kz9m\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894176 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894186 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rqxg\" (UniqueName: \"kubernetes.io/projected/464d5189-d9e5-4b18-b383-a7d75a28771b-kube-api-access-4rqxg\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894197 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kcl9\" (UniqueName: \"kubernetes.io/projected/4c03f40d-958a-49a0-a2f7-54a1f175caf7-kube-api-access-5kcl9\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894204 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894213 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4c03f40d-958a-49a0-a2f7-54a1f175caf7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894222 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d973a56d-fc8e-4cef-a590-d21d32242dc4-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894230 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/464d5189-d9e5-4b18-b383-a7d75a28771b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894237 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ba0bce06-3761-4bb4-8e35-305dc48b3277-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894246 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbqm7\" (UniqueName: \"kubernetes.io/projected/d973a56d-fc8e-4cef-a590-d21d32242dc4-kube-api-access-lbqm7\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894254 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79cqm\" (UniqueName: \"kubernetes.io/projected/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-api-access-79cqm\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894264 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sq78f\" (UniqueName: \"kubernetes.io/projected/29b25b8b-8f1c-4f60-b275-f924f1c0812a-kube-api-access-sq78f\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894273 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29b25b8b-8f1c-4f60-b275-f924f1c0812a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894281 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnvv4\" (UniqueName: \"kubernetes.io/projected/ba0bce06-3761-4bb4-8e35-305dc48b3277-kube-api-access-gnvv4\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.894289 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8278s\" (UniqueName: \"kubernetes.io/projected/c69ec619-0d17-4a49-8f97-6db48291122d-kube-api-access-8278s\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.895123 4857 generic.go:334] "Generic (PLEG): container finished" podID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerID="e34bc2b2baed17dc94ca9dc3ed8f2bf2da5b6e0bf67630dbb959a6581e276f47" exitCode=0 Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.895188 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c","Type":"ContainerDied","Data":"e34bc2b2baed17dc94ca9dc3ed8f2bf2da5b6e0bf67630dbb959a6581e276f47"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.921316 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e","Type":"ContainerDied","Data":"8e94c359ab01e9b810ccced94896ac9ef45f1b3a60a3963ce6a7e7bb3a2ad954"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.921425 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.953300 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d973a56d-fc8e-4cef-a590-d21d32242dc4","Type":"ContainerDied","Data":"a5e7d1f145a9ce55b38a7c1ad7a592dcbaca7df0911162ddeba3a09a4c960457"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.953590 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.955383 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8199-account-delete-z9b9h" event={"ID":"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd","Type":"ContainerStarted","Data":"0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.955873 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican8199-account-delete-z9b9h" secret="" err="secret \"galera-openstack-dockercfg-g4gh7\" not found" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.958375 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.959219 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placementf611-account-delete-gckl9" event={"ID":"29b25b8b-8f1c-4f60-b275-f924f1c0812a","Type":"ContainerDied","Data":"3efe01fb1e0069f64a0116ea290822480a0c467db280b61136f0bd5525450f6b"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.959243 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3efe01fb1e0069f64a0116ea290822480a0c467db280b61136f0bd5525450f6b" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.959289 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placementf611-account-delete-gckl9" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.988069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"10f5dca0-ca0a-4e88-838f-14affb1dead5","Type":"ContainerDied","Data":"90d2b27188d8be8064e748515e9e66bc806f8c6570931142c0dff042ab3adebb"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.988087 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.991266 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi379c-account-delete-7x584" event={"ID":"8288e005-1d07-4989-bc64-64b3ecd62993","Type":"ContainerStarted","Data":"29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95"} Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.996180 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.997170 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/novaapi379c-account-delete-7x584" secret="" err="secret \"galera-openstack-dockercfg-g4gh7\" not found" Nov 28 13:55:08 crc kubenswrapper[4857]: E1128 13:55:08.997780 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:08 crc kubenswrapper[4857]: E1128 13:55:08.997992 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:55:09.497964476 +0000 UTC m=+1559.621905913 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.998343 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinderf448-account-delete-vb5h7" Nov 28 13:55:08 crc kubenswrapper[4857]: I1128 13:55:08.998342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinderf448-account-delete-vb5h7" event={"ID":"ba0bce06-3761-4bb4-8e35-305dc48b3277","Type":"ContainerDied","Data":"a2f8f2846268271e23b1f3ecaa4f721389aacc2ff5cf3cd57d872c567d6e117f"} Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.000585 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2f8f2846268271e23b1f3ecaa4f721389aacc2ff5cf3cd57d872c567d6e117f" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.004263 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.006369 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron0f31-account-delete-t4p7d" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.006457 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron0f31-account-delete-t4p7d" event={"ID":"4c03f40d-958a-49a0-a2f7-54a1f175caf7","Type":"ContainerDied","Data":"cf7f9a668cadd23d1dad5e5511d43a0e4333af0716d6bf6e05fadfb9dc1a3e91"} Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.006510 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf7f9a668cadd23d1dad5e5511d43a0e4333af0716d6bf6e05fadfb9dc1a3e91" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.006608 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.007003 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.020116 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novacell0837f-account-delete-fhf8k" podStartSLOduration=7.020094369 podStartE2EDuration="7.020094369s" podCreationTimestamp="2025-11-28 13:55:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:55:08.865164849 +0000 UTC m=+1558.989106316" watchObservedRunningTime="2025-11-28 13:55:09.020094369 +0000 UTC m=+1559.144035796" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.036887 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican8199-account-delete-z9b9h" podStartSLOduration=8.036866371 podStartE2EDuration="8.036866371s" podCreationTimestamp="2025-11-28 13:55:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:55:08.993078288 +0000 UTC m=+1559.117019725" watchObservedRunningTime="2025-11-28 13:55:09.036866371 +0000 UTC m=+1559.160807808" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.065415 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" (UID: "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.085246 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/novaapi379c-account-delete-7x584" podStartSLOduration=7.085224175 podStartE2EDuration="7.085224175s" podCreationTimestamp="2025-11-28 13:55:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 13:55:09.018787425 +0000 UTC m=+1559.142728882" watchObservedRunningTime="2025-11-28 13:55:09.085224175 +0000 UTC m=+1559.209165612" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.091200 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-config-data" (OuterVolumeSpecName: "config-data") pod "10f5dca0-ca0a-4e88-838f-14affb1dead5" (UID: "10f5dca0-ca0a-4e88-838f-14affb1dead5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.094166 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c69ec619-0d17-4a49-8f97-6db48291122d" (UID: "c69ec619-0d17-4a49-8f97-6db48291122d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.098112 4857 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.098473 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.099139 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.099289 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts podName:8288e005-1d07-4989-bc64-64b3ecd62993 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:09.599186282 +0000 UTC m=+1559.723127719 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts") pod "novaapi379c-account-delete-7x584" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993") : configmap "openstack-scripts" not found Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.099366 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.126491 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" (UID: "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.134400 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.135252 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.154210 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c69ec619-0d17-4a49-8f97-6db48291122d" (UID: "c69ec619-0d17-4a49-8f97-6db48291122d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.155151 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-config-data" (OuterVolumeSpecName: "config-data") pod "c69ec619-0d17-4a49-8f97-6db48291122d" (UID: "c69ec619-0d17-4a49-8f97-6db48291122d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.162588 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10f5dca0-ca0a-4e88-838f-14affb1dead5" (UID: "10f5dca0-ca0a-4e88-838f-14affb1dead5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.177638 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "c69ec619-0d17-4a49-8f97-6db48291122d" (UID: "c69ec619-0d17-4a49-8f97-6db48291122d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.180048 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" (UID: "d64a95a8-7e15-40a3-b2f4-54c65c2cbf45"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.181258 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "10f5dca0-ca0a-4e88-838f-14affb1dead5" (UID: "10f5dca0-ca0a-4e88-838f-14affb1dead5"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.183280 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.189441 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.189534 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.193435 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data" (OuterVolumeSpecName: "config-data") pod "d973a56d-fc8e-4cef-a590-d21d32242dc4" (UID: "d973a56d-fc8e-4cef-a590-d21d32242dc4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.200933 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.200974 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.200984 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.200997 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201006 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201014 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201024 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10f5dca0-ca0a-4e88-838f-14affb1dead5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201032 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201040 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201048 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d973a56d-fc8e-4cef-a590-d21d32242dc4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201056 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201066 4857 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.201075 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c69ec619-0d17-4a49-8f97-6db48291122d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.201131 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.201175 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data podName:e5ec18e7-6719-46dd-b580-303f3da41869 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:17.201159668 +0000 UTC m=+1567.325101105 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data") pod "rabbitmq-cell1-server-0" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869") : configmap "rabbitmq-cell1-config-data" not found Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.217149 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0944133e-cee5-4927-8f5e-8f781b30d224" (UID: "0944133e-cee5-4927-8f5e-8f781b30d224"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.264815 4857 scope.go:117] "RemoveContainer" containerID="2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.265498 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301\": container with ID starting with 2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301 not found: ID does not exist" containerID="2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.265540 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301"} err="failed to get container status \"2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301\": rpc error: code = NotFound desc = could not find container \"2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301\": container with ID starting with 2deee318dc0256b8ef37ddeba48860fdac16b660b66a585af3a3827fae2b5301 not found: ID does not exist" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.265563 4857 scope.go:117] "RemoveContainer" containerID="be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.266131 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061\": container with ID starting with be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061 not found: ID does not exist" containerID="be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.266151 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061"} err="failed to get container status \"be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061\": rpc error: code = NotFound desc = could not find container \"be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061\": container with ID starting with be1b1db83f35439731454aeddb4f9dfe722c000a3ea47cdfea8a67b86a4ff061 not found: ID does not exist" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.266165 4857 scope.go:117] "RemoveContainer" containerID="89b75be03709a9433e326422eced730b8165104d804c031618aebe74ce32bcdd" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.284591 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.290373 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.298761 4857 scope.go:117] "RemoveContainer" containerID="10bea3eb3ec5b5f1dc962843d2fb1b65bf5e100082e1824518a74a4bdbf6d742" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.301805 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data\") pod \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.301877 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data-custom\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.301900 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9zrs\" (UniqueName: \"kubernetes.io/projected/63f7c690-a408-4e1f-8959-b22badb1b9dc-kube-api-access-m9zrs\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.301928 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-internal-tls-certs\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.301961 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-etc-machine-id\") pod \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.301994 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63f7c690-a408-4e1f-8959-b22badb1b9dc-logs\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302012 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fpvdq\" (UniqueName: \"kubernetes.io/projected/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-kube-api-access-fpvdq\") pod \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302111 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-scripts\") pod \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302153 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302182 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data-custom\") pod \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302211 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-combined-ca-bundle\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302283 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-combined-ca-bundle\") pod \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\" (UID: \"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.302640 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0944133e-cee5-4927-8f5e-8f781b30d224-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.306043 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" (UID: "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.306591 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63f7c690-a408-4e1f-8959-b22badb1b9dc-logs" (OuterVolumeSpecName: "logs") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.317503 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" (UID: "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.322466 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.325258 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-kube-api-access-fpvdq" (OuterVolumeSpecName: "kube-api-access-fpvdq") pod "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" (UID: "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c"). InnerVolumeSpecName "kube-api-access-fpvdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.329269 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63f7c690-a408-4e1f-8959-b22badb1b9dc-kube-api-access-m9zrs" (OuterVolumeSpecName: "kube-api-access-m9zrs") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "kube-api-access-m9zrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.329284 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-scripts" (OuterVolumeSpecName: "scripts") pod "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" (UID: "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.335027 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.370732 4857 scope.go:117] "RemoveContainer" containerID="5f6e16bc3a6a3e255193ac8544c8911785ede7f13bf7ff1f4eb96c9e2ab1632c" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.371195 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.381875 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.390887 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.393841 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.402529 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.403464 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.405106 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-kolla-config\") pod \"f1f87bb5-7cc1-4533-b145-d855e45205ca\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.405377 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-config-data\") pod \"f1f87bb5-7cc1-4533-b145-d855e45205ca\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.405582 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-97dgx\" (UniqueName: \"kubernetes.io/projected/f1f87bb5-7cc1-4533-b145-d855e45205ca-kube-api-access-97dgx\") pod \"f1f87bb5-7cc1-4533-b145-d855e45205ca\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.405687 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs\") pod \"63f7c690-a408-4e1f-8959-b22badb1b9dc\" (UID: \"63f7c690-a408-4e1f-8959-b22badb1b9dc\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.405847 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-memcached-tls-certs\") pod \"f1f87bb5-7cc1-4533-b145-d855e45205ca\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.405938 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "f1f87bb5-7cc1-4533-b145-d855e45205ca" (UID: "f1f87bb5-7cc1-4533-b145-d855e45205ca"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: W1128 13:55:09.406424 4857 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/63f7c690-a408-4e1f-8959-b22badb1b9dc/volumes/kubernetes.io~secret/public-tls-certs Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.406451 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.407264 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-config-data" (OuterVolumeSpecName: "config-data") pod "f1f87bb5-7cc1-4533-b145-d855e45205ca" (UID: "f1f87bb5-7cc1-4533-b145-d855e45205ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.408790 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.408905 4857 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409057 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409204 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/63f7c690-a408-4e1f-8959-b22badb1b9dc-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409283 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fpvdq\" (UniqueName: \"kubernetes.io/projected/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-kube-api-access-fpvdq\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409370 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f1f87bb5-7cc1-4533-b145-d855e45205ca-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409450 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409523 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.409616 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.410053 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.411475 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.411578 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9zrs\" (UniqueName: \"kubernetes.io/projected/63f7c690-a408-4e1f-8959-b22badb1b9dc-kube-api-access-m9zrs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.416345 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.426523 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1f87bb5-7cc1-4533-b145-d855e45205ca-kube-api-access-97dgx" (OuterVolumeSpecName: "kube-api-access-97dgx") pod "f1f87bb5-7cc1-4533-b145-d855e45205ca" (UID: "f1f87bb5-7cc1-4533-b145-d855e45205ca"). InnerVolumeSpecName "kube-api-access-97dgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.431684 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.436647 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" (UID: "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.436795 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data" (OuterVolumeSpecName: "config-data") pod "63f7c690-a408-4e1f-8959-b22badb1b9dc" (UID: "63f7c690-a408-4e1f-8959-b22badb1b9dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.444241 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.459057 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data" (OuterVolumeSpecName: "config-data") pod "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" (UID: "9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.465226 4857 scope.go:117] "RemoveContainer" containerID="d99386d5fb6210356b4912e9ae342dc21823ae6279091264ad6b9328047690e7" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.465519 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.480487 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.498738 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.508838 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.514276 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-combined-ca-bundle\") pod \"f1f87bb5-7cc1-4533-b145-d855e45205ca\" (UID: \"f1f87bb5-7cc1-4533-b145-d855e45205ca\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.515504 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-97dgx\" (UniqueName: \"kubernetes.io/projected/f1f87bb5-7cc1-4533-b145-d855e45205ca-kube-api-access-97dgx\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.515530 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.515557 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.515572 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63f7c690-a408-4e1f-8959-b22badb1b9dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.515655 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.515714 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:55:10.515696092 +0000 UTC m=+1560.639637529 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.521216 4857 scope.go:117] "RemoveContainer" containerID="0e9047254aa31e8c764eaf4e0c00c00b8b889bb9a2e0ba225c71bf05a09cdad0" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.536551 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "f1f87bb5-7cc1-4533-b145-d855e45205ca" (UID: "f1f87bb5-7cc1-4533-b145-d855e45205ca"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.546706 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.547400 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-794cbbfc48-m96jr"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.557363 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1f87bb5-7cc1-4533-b145-d855e45205ca" (UID: "f1f87bb5-7cc1-4533-b145-d855e45205ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.558205 4857 scope.go:117] "RemoveContainer" containerID="64a76382fda6f51faa4f808e7cb3cf14ee31449fa967229b15df56ca85a66806" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.573259 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-794cbbfc48-m96jr"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.592593 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.599868 4857 scope.go:117] "RemoveContainer" containerID="2393368f1d7599e575bd2865623f799eecabedba72c2c1b25e2a5dd440954069" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.601793 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.620508 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pq68\" (UniqueName: \"kubernetes.io/projected/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-kube-api-access-2pq68\") pod \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.620678 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts\") pod \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\" (UID: \"f1ee99c5-4515-45fd-ad45-cd7d96f85c10\") " Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.621316 4857 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.621348 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1f87bb5-7cc1-4533-b145-d855e45205ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.621424 4857 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.621484 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data podName:e5550a25-04ef-4dde-afd4-627f1df97a90 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:17.621464518 +0000 UTC m=+1567.745405955 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data") pod "rabbitmq-server-0" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90") : configmap "rabbitmq-config-data" not found Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.623016 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f1ee99c5-4515-45fd-ad45-cd7d96f85c10" (UID: "f1ee99c5-4515-45fd-ad45-cd7d96f85c10"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.623177 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.623767 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts podName:8288e005-1d07-4989-bc64-64b3ecd62993 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:10.623243165 +0000 UTC m=+1560.747184602 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts") pod "novaapi379c-account-delete-7x584" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993") : configmap "openstack-scripts" not found Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.623801 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.628030 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-kube-api-access-2pq68" (OuterVolumeSpecName: "kube-api-access-2pq68") pod "f1ee99c5-4515-45fd-ad45-cd7d96f85c10" (UID: "f1ee99c5-4515-45fd-ad45-cd7d96f85c10"). InnerVolumeSpecName "kube-api-access-2pq68". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.645255 4857 scope.go:117] "RemoveContainer" containerID="bafd64668cff0693bb5967882f3c22be467009f95e4f118cff44737cfc28e0af" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.648840 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.672994 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.677355 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.702921 4857 scope.go:117] "RemoveContainer" containerID="9eb233258cdcc0845df438e157136a632bd3695d5e815406ce0875d7029891d2" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.730568 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pq68\" (UniqueName: \"kubernetes.io/projected/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-kube-api-access-2pq68\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.730619 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f1ee99c5-4515-45fd-ad45-cd7d96f85c10-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.764416 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.766072 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.767965 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 28 13:55:09 crc kubenswrapper[4857]: E1128 13:55:09.768005 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerName="nova-cell0-conductor-conductor" Nov 28 13:55:09 crc kubenswrapper[4857]: I1128 13:55:09.841971 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-cpnwx"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.017505 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-68dfcc5468-bgz8k" event={"ID":"63f7c690-a408-4e1f-8959-b22badb1b9dc","Type":"ContainerDied","Data":"e64b362c95aa7cd19c2a82ea8df71ad08ba359967fcd92b776ad971e0f91060e"} Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.017562 4857 scope.go:117] "RemoveContainer" containerID="32f2e88243b10a53bc37602a5a649cb5b43c66d0e3d47ca1f87ead6c5ef19c53" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.017690 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-68dfcc5468-bgz8k" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.027640 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f1f87bb5-7cc1-4533-b145-d855e45205ca","Type":"ContainerDied","Data":"f676462d7229d8985e4b6ca0f9266e20580a12cf20d943bd71b4de086fff09c2"} Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.027743 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.030620 4857 generic.go:334] "Generic (PLEG): container finished" podID="e5ec18e7-6719-46dd-b580-303f3da41869" containerID="af0d6704e41a2d48d923ac3295bc509705c2c44269b7920b44c7cf73327f3eda" exitCode=0 Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.030710 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e5ec18e7-6719-46dd-b580-303f3da41869","Type":"ContainerDied","Data":"af0d6704e41a2d48d923ac3295bc509705c2c44269b7920b44c7cf73327f3eda"} Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.034307 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpnwx" event={"ID":"7f9668e3-0293-4d8f-aa56-ad830134b0e4","Type":"ContainerStarted","Data":"dcb8094d70ba370f0684fdfa21268925212eb09ae0f3a0480d7b6dc1fcb8cd59"} Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.038635 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell196aa-account-delete-rnjfc" event={"ID":"f1ee99c5-4515-45fd-ad45-cd7d96f85c10","Type":"ContainerDied","Data":"6665b97bc0f291b293104a38aadfda52e592701b4424089046b1ed83c3f616ae"} Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.038710 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell196aa-account-delete-rnjfc" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.048463 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c","Type":"ContainerDied","Data":"e67a29c57992976bef66f65980c56527ff2dccd86514f6a38ee24b1cf6b1a217"} Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.048478 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.048545 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/novaapi379c-account-delete-7x584" podUID="8288e005-1d07-4989-bc64-64b3ecd62993" containerName="mariadb-account-delete" containerID="cri-o://29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95" gracePeriod=30 Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.049685 4857 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/barbican8199-account-delete-z9b9h" secret="" err="secret \"galera-openstack-dockercfg-g4gh7\" not found" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.070433 4857 scope.go:117] "RemoveContainer" containerID="4b9f9dd6d4c0768cd26615fd0b66fda62c0a3074685c2e3db7b9ec59c30f07ed" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.081496 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-68dfcc5468-bgz8k"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.109866 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-68dfcc5468-bgz8k"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.126110 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell196aa-account-delete-rnjfc"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.152460 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell196aa-account-delete-rnjfc"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.156415 4857 scope.go:117] "RemoveContainer" containerID="27e0b4afa9ae671a1b222c98eb790a7935045df19723929282aaa365b28ec8a1" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.172378 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.192634 4857 scope.go:117] "RemoveContainer" containerID="cfbb9c4939e99585e8309aa60ce1b27ebc930232a5842d8afc27834efe50659f" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.201017 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.225101 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.272121 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" path="/var/lib/kubelet/pods/0944133e-cee5-4927-8f5e-8f781b30d224/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.275166 4857 scope.go:117] "RemoveContainer" containerID="949cd8e48f0fc8953dd188f71049a00ec864bfbb7da60444a68266e465fb949e" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.280200 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" path="/var/lib/kubelet/pods/10f5dca0-ca0a-4e88-838f-14affb1dead5/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.281172 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bf149d2-9beb-4394-921a-a703473391aa" path="/var/lib/kubelet/pods/2bf149d2-9beb-4394-921a-a703473391aa/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.282885 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" path="/var/lib/kubelet/pods/4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.284001 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61549c8e-2955-4350-9055-731ceb896fdc" path="/var/lib/kubelet/pods/61549c8e-2955-4350-9055-731ceb896fdc/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.284515 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" path="/var/lib/kubelet/pods/63f7c690-a408-4e1f-8959-b22badb1b9dc/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.285896 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" path="/var/lib/kubelet/pods/75c7c292-3658-4264-b86b-2a825aeb9ad4/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.286718 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" path="/var/lib/kubelet/pods/9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.287795 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" path="/var/lib/kubelet/pods/a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.289069 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" path="/var/lib/kubelet/pods/c69ec619-0d17-4a49-8f97-6db48291122d/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.304931 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" path="/var/lib/kubelet/pods/d64a95a8-7e15-40a3-b2f4-54c65c2cbf45/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.306668 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" path="/var/lib/kubelet/pods/d973a56d-fc8e-4cef-a590-d21d32242dc4/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.307747 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e222fcd6-26e0-46af-82ab-7cf038a18195" path="/var/lib/kubelet/pods/e222fcd6-26e0-46af-82ab-7cf038a18195/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.308425 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1ee99c5-4515-45fd-ad45-cd7d96f85c10" path="/var/lib/kubelet/pods/f1ee99c5-4515-45fd-ad45-cd7d96f85c10/volumes" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.309075 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.323702 4857 scope.go:117] "RemoveContainer" containerID="e34bc2b2baed17dc94ca9dc3ed8f2bf2da5b6e0bf67630dbb959a6581e276f47" Nov 28 13:55:10 crc kubenswrapper[4857]: E1128 13:55:10.561427 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:10 crc kubenswrapper[4857]: E1128 13:55:10.561501 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:55:12.561480647 +0000 UTC m=+1562.685422084 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.617689 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.662839 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-plugins\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.662909 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-tls\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.662961 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zlqp\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-kube-api-access-7zlqp\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.662995 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5ec18e7-6719-46dd-b580-303f3da41869-pod-info\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663030 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-server-conf\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663080 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663121 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5ec18e7-6719-46dd-b580-303f3da41869-erlang-cookie-secret\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663175 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-confd\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663216 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663278 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-plugins-conf\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663348 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-erlang-cookie\") pod \"e5ec18e7-6719-46dd-b580-303f3da41869\" (UID: \"e5ec18e7-6719-46dd-b580-303f3da41869\") " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663418 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.663838 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: E1128 13:55:10.663909 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:10 crc kubenswrapper[4857]: E1128 13:55:10.664124 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts podName:8288e005-1d07-4989-bc64-64b3ecd62993 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:12.664102429 +0000 UTC m=+1562.788043876 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts") pod "novaapi379c-account-delete-7x584" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993") : configmap "openstack-scripts" not found Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.669614 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.670642 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.673139 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5ec18e7-6719-46dd-b580-303f3da41869-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.676523 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e5ec18e7-6719-46dd-b580-303f3da41869-pod-info" (OuterVolumeSpecName: "pod-info") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.676778 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.677889 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.689556 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data" (OuterVolumeSpecName: "config-data") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.699162 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-kube-api-access-7zlqp" (OuterVolumeSpecName: "kube-api-access-7zlqp") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "kube-api-access-7zlqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.723551 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-server-conf" (OuterVolumeSpecName: "server-conf") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765318 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765351 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zlqp\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-kube-api-access-7zlqp\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765361 4857 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5ec18e7-6719-46dd-b580-303f3da41869-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765368 4857 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765391 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765401 4857 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5ec18e7-6719-46dd-b580-303f3da41869-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765409 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765417 4857 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5ec18e7-6719-46dd-b580-303f3da41869-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.765425 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.801314 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.852499 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e5ec18e7-6719-46dd-b580-303f3da41869" (UID: "e5ec18e7-6719-46dd-b580-303f3da41869"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.866564 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:10 crc kubenswrapper[4857]: I1128 13:55:10.866607 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5ec18e7-6719-46dd-b580-303f3da41869-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.072274 4857 generic.go:334] "Generic (PLEG): container finished" podID="f6c9b673-669e-464a-b012-8b39314e1990" containerID="3d5a1e70902743d5830edbb49ef840b594da301bdcf85c3dfb36991d40ad00b1" exitCode=0 Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.072339 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f6c9b673-669e-464a-b012-8b39314e1990","Type":"ContainerDied","Data":"3d5a1e70902743d5830edbb49ef840b594da301bdcf85c3dfb36991d40ad00b1"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.074013 4857 generic.go:334] "Generic (PLEG): container finished" podID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerID="85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59" exitCode=0 Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.074057 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpnwx" event={"ID":"7f9668e3-0293-4d8f-aa56-ad830134b0e4","Type":"ContainerDied","Data":"85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.075663 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d4d3b72-fd05-4a47-925c-f17f77c46fc1" containerID="05d50721ac2243fb0f0316bcd6f40e7732694575564655c777cddace1e0267e4" exitCode=0 Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.075724 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b64c5866d-mkt8b" event={"ID":"5d4d3b72-fd05-4a47-925c-f17f77c46fc1","Type":"ContainerDied","Data":"05d50721ac2243fb0f0316bcd6f40e7732694575564655c777cddace1e0267e4"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.079039 4857 generic.go:334] "Generic (PLEG): container finished" podID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerID="3bfaa6d12b2af65b2c4bc9e67c77c455db443837bdebce53cc1736765094b03f" exitCode=0 Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.079145 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e5550a25-04ef-4dde-afd4-627f1df97a90","Type":"ContainerDied","Data":"3bfaa6d12b2af65b2c4bc9e67c77c455db443837bdebce53cc1736765094b03f"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.079176 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"e5550a25-04ef-4dde-afd4-627f1df97a90","Type":"ContainerDied","Data":"aab682b0c44e6ca44a0cd72b01cb044855f1e65aa098404b93df5b556456d257"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.079187 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aab682b0c44e6ca44a0cd72b01cb044855f1e65aa098404b93df5b556456d257" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.086642 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e5ec18e7-6719-46dd-b580-303f3da41869","Type":"ContainerDied","Data":"da71e64fb1ef0e05958001998905152cf192cd8cbccea76087ae208f9a1da63e"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.086688 4857 scope.go:117] "RemoveContainer" containerID="af0d6704e41a2d48d923ac3295bc509705c2c44269b7920b44c7cf73327f3eda" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.086855 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.100159 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e6597fde-8e34-4ccb-8784-1b7aa3680488/ovn-northd/0.log" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.100426 4857 generic.go:334] "Generic (PLEG): container finished" podID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerID="cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b" exitCode=139 Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.100483 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e6597fde-8e34-4ccb-8784-1b7aa3680488","Type":"ContainerDied","Data":"cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b"} Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.113977 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.131559 4857 scope.go:117] "RemoveContainer" containerID="2ae1f1de2ee889c449e3ff3c827e646d41938be57fdf8267c513587b8dbd2ecb" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176047 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5550a25-04ef-4dde-afd4-627f1df97a90-erlang-cookie-secret\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176179 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-tls\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176211 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-plugins-conf\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176224 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-server-conf\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176261 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-confd\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176279 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-plugins\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176302 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-erlang-cookie\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5550a25-04ef-4dde-afd4-627f1df97a90-pod-info\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176418 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176461 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jg4g7\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-kube-api-access-jg4g7\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.176513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data\") pod \"e5550a25-04ef-4dde-afd4-627f1df97a90\" (UID: \"e5550a25-04ef-4dde-afd4-627f1df97a90\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.179619 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.180249 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.180334 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.180674 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.190296 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.203286 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e5550a25-04ef-4dde-afd4-627f1df97a90-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.203327 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "persistence") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.203395 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-kube-api-access-jg4g7" (OuterVolumeSpecName: "kube-api-access-jg4g7") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "kube-api-access-jg4g7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.204612 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.208130 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data" (OuterVolumeSpecName: "config-data") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.214301 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e5550a25-04ef-4dde-afd4-627f1df97a90-pod-info" (OuterVolumeSpecName: "pod-info") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.239017 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e6597fde-8e34-4ccb-8784-1b7aa3680488/ovn-northd/0.log" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.239104 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.270479 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-server-conf" (OuterVolumeSpecName: "server-conf") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.274925 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-8ngsm"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279136 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-rundir\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279230 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-scripts\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279288 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-metrics-certs-tls-certs\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279366 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-combined-ca-bundle\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279414 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jm7jj\" (UniqueName: \"kubernetes.io/projected/e6597fde-8e34-4ccb-8784-1b7aa3680488-kube-api-access-jm7jj\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279458 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-northd-tls-certs\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279486 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-config\") pod \"e6597fde-8e34-4ccb-8784-1b7aa3680488\" (UID: \"e6597fde-8e34-4ccb-8784-1b7aa3680488\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.279566 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280080 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-config" (OuterVolumeSpecName: "config") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280099 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280113 4857 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e5550a25-04ef-4dde-afd4-627f1df97a90-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280134 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280144 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jg4g7\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-kube-api-access-jg4g7\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280153 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280162 4857 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e5550a25-04ef-4dde-afd4-627f1df97a90-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280170 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280179 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280187 4857 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280194 4857 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e5550a25-04ef-4dde-afd4-627f1df97a90-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280202 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.280467 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-scripts" (OuterVolumeSpecName: "scripts") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.285112 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron0f31-account-delete-t4p7d"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.285795 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6597fde-8e34-4ccb-8784-1b7aa3680488-kube-api-access-jm7jj" (OuterVolumeSpecName: "kube-api-access-jm7jj") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "kube-api-access-jm7jj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.300138 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.303940 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-8ngsm"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.308270 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.308323 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.313815 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron0f31-account-delete-t4p7d"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.318199 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.329460 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.363316 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-0f31-account-create-update-zmqth"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.374529 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-0f31-account-create-update-zmqth"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380610 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-fernet-keys\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380657 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-public-tls-certs\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380775 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-combined-ca-bundle\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380798 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-config-data\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380831 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kftc\" (UniqueName: \"kubernetes.io/projected/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-kube-api-access-5kftc\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380852 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-scripts\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380874 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-credential-keys\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.380924 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-internal-tls-certs\") pod \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\" (UID: \"5d4d3b72-fd05-4a47-925c-f17f77c46fc1\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.381297 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.381315 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.381324 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.381336 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jm7jj\" (UniqueName: \"kubernetes.io/projected/e6597fde-8e34-4ccb-8784-1b7aa3680488-kube-api-access-jm7jj\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.381344 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e6597fde-8e34-4ccb-8784-1b7aa3680488-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.386383 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.388318 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e5550a25-04ef-4dde-afd4-627f1df97a90" (UID: "e5550a25-04ef-4dde-afd4-627f1df97a90"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.388641 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.391690 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-kube-api-access-5kftc" (OuterVolumeSpecName: "kube-api-access-5kftc") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "kube-api-access-5kftc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.400539 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-6dhdz"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.404242 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-scripts" (OuterVolumeSpecName: "scripts") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.404394 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.406365 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "e6597fde-8e34-4ccb-8784-1b7aa3680488" (UID: "e6597fde-8e34-4ccb-8784-1b7aa3680488"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.413688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.416097 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-6dhdz"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.422009 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-25bf-account-create-update-2pcfz"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.429570 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance25bf-account-delete-78p7f"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.435053 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance25bf-account-delete-78p7f"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.435360 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-config-data" (OuterVolumeSpecName: "config-data") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.447927 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-25bf-account-create-update-2pcfz"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.451970 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502660 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502692 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502702 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502716 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e5550a25-04ef-4dde-afd4-627f1df97a90-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502726 4857 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/e6597fde-8e34-4ccb-8784-1b7aa3680488-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502736 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502745 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502759 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kftc\" (UniqueName: \"kubernetes.io/projected/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-kube-api-access-5kftc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502771 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.502782 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.503800 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5d4d3b72-fd05-4a47-925c-f17f77c46fc1" (UID: "5d4d3b72-fd05-4a47-925c-f17f77c46fc1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.521375 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-csvmd"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.527585 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-csvmd"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.544606 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinderf448-account-delete-vb5h7"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.552705 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinderf448-account-delete-vb5h7"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.564103 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f448-account-create-update-976jr"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.566453 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.574227 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-f448-account-create-update-976jr"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.603813 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f6c9b673-669e-464a-b012-8b39314e1990-config-data-generated\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.603863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-config-data-default\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.603887 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmch9\" (UniqueName: \"kubernetes.io/projected/f6c9b673-669e-464a-b012-8b39314e1990-kube-api-access-cmch9\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.603912 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-operator-scripts\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.603938 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-kolla-config\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604101 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-galera-tls-certs\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604136 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-combined-ca-bundle\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604159 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"f6c9b673-669e-464a-b012-8b39314e1990\" (UID: \"f6c9b673-669e-464a-b012-8b39314e1990\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604211 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f6c9b673-669e-464a-b012-8b39314e1990-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604444 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d4d3b72-fd05-4a47-925c-f17f77c46fc1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604464 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f6c9b673-669e-464a-b012-8b39314e1990-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604604 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.604684 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.606823 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.609630 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6c9b673-669e-464a-b012-8b39314e1990-kube-api-access-cmch9" (OuterVolumeSpecName: "kube-api-access-cmch9") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "kube-api-access-cmch9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.616985 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "mysql-db") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.636221 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.672642 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "f6c9b673-669e-464a-b012-8b39314e1990" (UID: "f6c9b673-669e-464a-b012-8b39314e1990"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.701603 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-6db4d"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.711058 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-6db4d"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713392 4857 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713422 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6c9b673-669e-464a-b012-8b39314e1990-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713445 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713455 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713465 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmch9\" (UniqueName: \"kubernetes.io/projected/f6c9b673-669e-464a-b012-8b39314e1990-kube-api-access-cmch9\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713475 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.713483 4857 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f6c9b673-669e-464a-b012-8b39314e1990-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.721898 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placementf611-account-delete-gckl9"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.728853 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f611-account-create-update-24frn"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.734343 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placementf611-account-delete-gckl9"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.739874 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f611-account-create-update-24frn"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.743578 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.757907 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.815723 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/791bae3e-043c-4a91-8e8b-d1d574dcb008-logs\") pod \"791bae3e-043c-4a91-8e8b-d1d574dcb008\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816008 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data\") pod \"791bae3e-043c-4a91-8e8b-d1d574dcb008\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816040 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-combined-ca-bundle\") pod \"791bae3e-043c-4a91-8e8b-d1d574dcb008\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bghft\" (UniqueName: \"kubernetes.io/projected/791bae3e-043c-4a91-8e8b-d1d574dcb008-kube-api-access-bghft\") pod \"791bae3e-043c-4a91-8e8b-d1d574dcb008\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816200 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data-custom\") pod \"791bae3e-043c-4a91-8e8b-d1d574dcb008\" (UID: \"791bae3e-043c-4a91-8e8b-d1d574dcb008\") " Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816286 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/791bae3e-043c-4a91-8e8b-d1d574dcb008-logs" (OuterVolumeSpecName: "logs") pod "791bae3e-043c-4a91-8e8b-d1d574dcb008" (UID: "791bae3e-043c-4a91-8e8b-d1d574dcb008"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816745 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/791bae3e-043c-4a91-8e8b-d1d574dcb008-logs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.816765 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.821981 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/791bae3e-043c-4a91-8e8b-d1d574dcb008-kube-api-access-bghft" (OuterVolumeSpecName: "kube-api-access-bghft") pod "791bae3e-043c-4a91-8e8b-d1d574dcb008" (UID: "791bae3e-043c-4a91-8e8b-d1d574dcb008"). InnerVolumeSpecName "kube-api-access-bghft". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.824560 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "791bae3e-043c-4a91-8e8b-d1d574dcb008" (UID: "791bae3e-043c-4a91-8e8b-d1d574dcb008"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.858088 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "791bae3e-043c-4a91-8e8b-d1d574dcb008" (UID: "791bae3e-043c-4a91-8e8b-d1d574dcb008"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.897143 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data" (OuterVolumeSpecName: "config-data") pod "791bae3e-043c-4a91-8e8b-d1d574dcb008" (UID: "791bae3e-043c-4a91-8e8b-d1d574dcb008"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.918804 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.918858 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.918874 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bghft\" (UniqueName: \"kubernetes.io/projected/791bae3e-043c-4a91-8e8b-d1d574dcb008-kube-api-access-bghft\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.918886 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/791bae3e-043c-4a91-8e8b-d1d574dcb008-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.954413 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-s5qtj"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.962858 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-s5qtj"] Nov 28 13:55:11 crc kubenswrapper[4857]: I1128 13:55:11.985262 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-8199-account-create-update-7ssjr"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:11.999192 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican8199-account-delete-z9b9h"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:11.999391 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican8199-account-delete-z9b9h" podUID="2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" containerName="mariadb-account-delete" containerID="cri-o://0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13" gracePeriod=30 Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.002726 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-8199-account-create-update-7ssjr"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.012024 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.044857 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.046668 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.046762 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.048322 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.048368 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.048105 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.051217 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.051278 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.115817 4857 generic.go:334] "Generic (PLEG): container finished" podID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" exitCode=0 Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.115898 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d","Type":"ContainerDied","Data":"ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.115938 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d","Type":"ContainerDied","Data":"ae25b19badcac27915cf81d4556d1e0d3bdadc18cc6fee98258756ac117181b9"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.115975 4857 scope.go:117] "RemoveContainer" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.116162 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.120173 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f6c9b673-669e-464a-b012-8b39314e1990","Type":"ContainerDied","Data":"0d7c47b9349fc16ac761956ae2eeb57a0d5d177db9e2522a53c6cc79d9ae34af"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.120253 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.121859 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmgs7\" (UniqueName: \"kubernetes.io/projected/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-kube-api-access-mmgs7\") pod \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.121918 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-config-data\") pod \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.121996 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-combined-ca-bundle\") pod \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\" (UID: \"73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d\") " Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.128196 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_e6597fde-8e34-4ccb-8784-1b7aa3680488/ovn-northd/0.log" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.128472 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"e6597fde-8e34-4ccb-8784-1b7aa3680488","Type":"ContainerDied","Data":"61fcce362257cc668b6c26a68f147eb2f41e15df46f8195149a15276320c116e"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.128513 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.137290 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-kube-api-access-mmgs7" (OuterVolumeSpecName: "kube-api-access-mmgs7") pod "73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" (UID: "73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d"). InnerVolumeSpecName "kube-api-access-mmgs7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.143178 4857 generic.go:334] "Generic (PLEG): container finished" podID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerID="4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4" exitCode=0 Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.143354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7d59cc7587-wt4q5" event={"ID":"791bae3e-043c-4a91-8e8b-d1d574dcb008","Type":"ContainerDied","Data":"4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.143390 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7d59cc7587-wt4q5" event={"ID":"791bae3e-043c-4a91-8e8b-d1d574dcb008","Type":"ContainerDied","Data":"0b7b9479c88e90b598b53d39451837a5437d8c6b571a24c0bbcac4880d5502d4"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.143421 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7d59cc7587-wt4q5" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.147582 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5b64c5866d-mkt8b" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.151030 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5b64c5866d-mkt8b" event={"ID":"5d4d3b72-fd05-4a47-925c-f17f77c46fc1","Type":"ContainerDied","Data":"67add3ab60c21921a75d4024750fceb80cecdc89bb2d69480c26ea4c366599cf"} Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.152666 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.175681 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-config-data" (OuterVolumeSpecName: "config-data") pod "73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" (UID: "73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.178421 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" (UID: "73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.185957 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.213891 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.216552 4857 scope.go:117] "RemoveContainer" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.216974 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8\": container with ID starting with ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8 not found: ID does not exist" containerID="ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.217004 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8"} err="failed to get container status \"ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8\": rpc error: code = NotFound desc = could not find container \"ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8\": container with ID starting with ba6185dbaf33b25f5b661fea2dcf82e7a24370bf93cc9ba5e314ebb2abc985f8 not found: ID does not exist" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.217022 4857 scope.go:117] "RemoveContainer" containerID="3d5a1e70902743d5830edbb49ef840b594da301bdcf85c3dfb36991d40ad00b1" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.223820 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.223857 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.223867 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmgs7\" (UniqueName: \"kubernetes.io/projected/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d-kube-api-access-mmgs7\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.243915 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0832be69-9adb-40ba-a288-42ec7741fc19" path="/var/lib/kubelet/pods/0832be69-9adb-40ba-a288-42ec7741fc19/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.244584 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1237773a-dc19-47c6-90cc-eb0de954d9b4" path="/var/lib/kubelet/pods/1237773a-dc19-47c6-90cc-eb0de954d9b4/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.245262 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29b25b8b-8f1c-4f60-b275-f924f1c0812a" path="/var/lib/kubelet/pods/29b25b8b-8f1c-4f60-b275-f924f1c0812a/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.245776 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2df15232-d07b-49ae-99b9-60bb31ad3ff3" path="/var/lib/kubelet/pods/2df15232-d07b-49ae-99b9-60bb31ad3ff3/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.248318 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e942756-b43c-4088-a714-7445b3dab481" path="/var/lib/kubelet/pods/3e942756-b43c-4088-a714-7445b3dab481/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.248836 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="464d5189-d9e5-4b18-b383-a7d75a28771b" path="/var/lib/kubelet/pods/464d5189-d9e5-4b18-b383-a7d75a28771b/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.249296 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c03f40d-958a-49a0-a2f7-54a1f175caf7" path="/var/lib/kubelet/pods/4c03f40d-958a-49a0-a2f7-54a1f175caf7/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.252143 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c66fec5-8c9d-44f0-8a86-8dd74ea565b2" path="/var/lib/kubelet/pods/4c66fec5-8c9d-44f0-8a86-8dd74ea565b2/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.252916 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="967609ba-f165-4bfa-b13d-d23154c329e7" path="/var/lib/kubelet/pods/967609ba-f165-4bfa-b13d-d23154c329e7/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.253441 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f1e36e4-55cc-422a-9fad-ef48cc42cbe6" path="/var/lib/kubelet/pods/9f1e36e4-55cc-422a-9fad-ef48cc42cbe6/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.253974 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2c8cca0-a390-4db6-978d-d32af42ac290" path="/var/lib/kubelet/pods/b2c8cca0-a390-4db6-978d-d32af42ac290/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.254429 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba0bce06-3761-4bb4-8e35-305dc48b3277" path="/var/lib/kubelet/pods/ba0bce06-3761-4bb4-8e35-305dc48b3277/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.255450 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f" path="/var/lib/kubelet/pods/e49aa4db-e19c-4a91-9faa-d1f86f8aaa8f/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.256164 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" path="/var/lib/kubelet/pods/e5ec18e7-6719-46dd-b580-303f3da41869/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.257229 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ead17b20-d23f-4794-9f28-4a536c60c48c" path="/var/lib/kubelet/pods/ead17b20-d23f-4794-9f28-4a536c60c48c/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.257729 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1f87bb5-7cc1-4533-b145-d855e45205ca" path="/var/lib/kubelet/pods/f1f87bb5-7cc1-4533-b145-d855e45205ca/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.258329 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6c9b673-669e-464a-b012-8b39314e1990" path="/var/lib/kubelet/pods/f6c9b673-669e-464a-b012-8b39314e1990/volumes" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.259510 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-5b64c5866d-mkt8b"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.275118 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-5b64c5866d-mkt8b"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.275210 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7d59cc7587-wt4q5"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.275226 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-7d59cc7587-wt4q5"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.283186 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.290790 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.300011 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.311870 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.322027 4857 scope.go:117] "RemoveContainer" containerID="94d359b86653eba30202b5b263492bd7c58d333129f67ab86a62151c212fd0cd" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.374853 4857 scope.go:117] "RemoveContainer" containerID="f18e195c94e7d7c3606c3e2d5bbadff3adfb1a2f922e9e1872782f709251b377" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.402325 4857 scope.go:117] "RemoveContainer" containerID="cb2e8e2db5b421b3011ac3c2410e936a5d71fda6c184fdd57c1dd60cdbcfc09b" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.439002 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.446530 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.491405 4857 scope.go:117] "RemoveContainer" containerID="4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.526670 4857 scope.go:117] "RemoveContainer" containerID="184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.563869 4857 scope.go:117] "RemoveContainer" containerID="4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4" Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.564263 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4\": container with ID starting with 4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4 not found: ID does not exist" containerID="4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.564396 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4"} err="failed to get container status \"4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4\": rpc error: code = NotFound desc = could not find container \"4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4\": container with ID starting with 4c9f26d142655cc105e4408ad7bf9835e75524af522502f18f09cbe79369a7e4 not found: ID does not exist" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.564428 4857 scope.go:117] "RemoveContainer" containerID="184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41" Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.564852 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41\": container with ID starting with 184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41 not found: ID does not exist" containerID="184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.565092 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41"} err="failed to get container status \"184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41\": rpc error: code = NotFound desc = could not find container \"184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41\": container with ID starting with 184fc257124031568af407b028afb09e8021dd8c63efc35a95cbe06c5d922f41 not found: ID does not exist" Nov 28 13:55:12 crc kubenswrapper[4857]: I1128 13:55:12.565121 4857 scope.go:117] "RemoveContainer" containerID="05d50721ac2243fb0f0316bcd6f40e7732694575564655c777cddace1e0267e4" Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.653315 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.653369 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:55:16.653354742 +0000 UTC m=+1566.777296179 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.754682 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:12 crc kubenswrapper[4857]: E1128 13:55:12.754778 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts podName:8288e005-1d07-4989-bc64-64b3ecd62993 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:16.754737472 +0000 UTC m=+1566.878678909 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts") pod "novaapi379c-account-delete-7x584" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993") : configmap "openstack-scripts" not found Nov 28 13:55:13 crc kubenswrapper[4857]: I1128 13:55:13.167069 4857 generic.go:334] "Generic (PLEG): container finished" podID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerID="ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583" exitCode=0 Nov 28 13:55:13 crc kubenswrapper[4857]: I1128 13:55:13.167088 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpnwx" event={"ID":"7f9668e3-0293-4d8f-aa56-ad830134b0e4","Type":"ContainerDied","Data":"ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583"} Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.186738 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpnwx" event={"ID":"7f9668e3-0293-4d8f-aa56-ad830134b0e4","Type":"ContainerStarted","Data":"372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83"} Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.208895 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-cpnwx" podStartSLOduration=6.3286763839999995 podStartE2EDuration="9.208876762s" podCreationTimestamp="2025-11-28 13:55:05 +0000 UTC" firstStartedPulling="2025-11-28 13:55:11.077234861 +0000 UTC m=+1561.201176298" lastFinishedPulling="2025-11-28 13:55:13.957435229 +0000 UTC m=+1564.081376676" observedRunningTime="2025-11-28 13:55:14.202874983 +0000 UTC m=+1564.326816460" watchObservedRunningTime="2025-11-28 13:55:14.208876762 +0000 UTC m=+1564.332818219" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.240257 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5d4d3b72-fd05-4a47-925c-f17f77c46fc1" path="/var/lib/kubelet/pods/5d4d3b72-fd05-4a47-925c-f17f77c46fc1/volumes" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.241304 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" path="/var/lib/kubelet/pods/73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d/volumes" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.242045 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" path="/var/lib/kubelet/pods/791bae3e-043c-4a91-8e8b-d1d574dcb008/volumes" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.245430 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" path="/var/lib/kubelet/pods/e5550a25-04ef-4dde-afd4-627f1df97a90/volumes" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.247030 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" path="/var/lib/kubelet/pods/e6597fde-8e34-4ccb-8784-1b7aa3680488/volumes" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.837935 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.988995 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-config-data\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989042 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-ceilometer-tls-certs\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989079 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-sg-core-conf-yaml\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989145 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-combined-ca-bundle\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989194 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-scripts\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989214 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xvv7\" (UniqueName: \"kubernetes.io/projected/e1e13053-d5d0-4d38-8758-4ebf494ededb-kube-api-access-6xvv7\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989265 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-log-httpd\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989285 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-run-httpd\") pod \"e1e13053-d5d0-4d38-8758-4ebf494ededb\" (UID: \"e1e13053-d5d0-4d38-8758-4ebf494ededb\") " Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.989915 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:14 crc kubenswrapper[4857]: I1128 13:55:14.990049 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.006869 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1e13053-d5d0-4d38-8758-4ebf494ededb-kube-api-access-6xvv7" (OuterVolumeSpecName: "kube-api-access-6xvv7") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "kube-api-access-6xvv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.008066 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-scripts" (OuterVolumeSpecName: "scripts") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.017131 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.031638 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.079064 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091702 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091866 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xvv7\" (UniqueName: \"kubernetes.io/projected/e1e13053-d5d0-4d38-8758-4ebf494ededb-kube-api-access-6xvv7\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091883 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091895 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e1e13053-d5d0-4d38-8758-4ebf494ededb-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091907 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091919 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.091930 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.122384 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-config-data" (OuterVolumeSpecName: "config-data") pod "e1e13053-d5d0-4d38-8758-4ebf494ededb" (UID: "e1e13053-d5d0-4d38-8758-4ebf494ededb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.196531 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e1e13053-d5d0-4d38-8758-4ebf494ededb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.200772 4857 generic.go:334] "Generic (PLEG): container finished" podID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerID="5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f" exitCode=0 Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.201061 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerDied","Data":"5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f"} Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.201099 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e1e13053-d5d0-4d38-8758-4ebf494ededb","Type":"ContainerDied","Data":"289009e237990e27dc94afc0837c8cdeafb1f7e41885cfba8e4cd247ede7bdc4"} Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.201112 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.201120 4857 scope.go:117] "RemoveContainer" containerID="b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.221226 4857 scope.go:117] "RemoveContainer" containerID="f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.248323 4857 scope.go:117] "RemoveContainer" containerID="5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.256087 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.267438 4857 scope.go:117] "RemoveContainer" containerID="c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.282175 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.289509 4857 scope.go:117] "RemoveContainer" containerID="b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a" Nov 28 13:55:15 crc kubenswrapper[4857]: E1128 13:55:15.290273 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a\": container with ID starting with b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a not found: ID does not exist" containerID="b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.290334 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a"} err="failed to get container status \"b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a\": rpc error: code = NotFound desc = could not find container \"b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a\": container with ID starting with b02b48f6a3bd9ecdaa5bd535ab743fbe80646abdb2b8e238d7f4f2f8e3dc0a5a not found: ID does not exist" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.290366 4857 scope.go:117] "RemoveContainer" containerID="f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d" Nov 28 13:55:15 crc kubenswrapper[4857]: E1128 13:55:15.290988 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d\": container with ID starting with f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d not found: ID does not exist" containerID="f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.291009 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d"} err="failed to get container status \"f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d\": rpc error: code = NotFound desc = could not find container \"f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d\": container with ID starting with f633a7fccbea552027e16642eeadd7827f3b2abc1b69605dcaca7883382f640d not found: ID does not exist" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.291021 4857 scope.go:117] "RemoveContainer" containerID="5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f" Nov 28 13:55:15 crc kubenswrapper[4857]: E1128 13:55:15.291325 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f\": container with ID starting with 5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f not found: ID does not exist" containerID="5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.291380 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f"} err="failed to get container status \"5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f\": rpc error: code = NotFound desc = could not find container \"5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f\": container with ID starting with 5f70629b403f96250df7835408fa39ed17b8aa7f4321d155cb4b5928ca498a2f not found: ID does not exist" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.291413 4857 scope.go:117] "RemoveContainer" containerID="c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135" Nov 28 13:55:15 crc kubenswrapper[4857]: E1128 13:55:15.291699 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135\": container with ID starting with c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135 not found: ID does not exist" containerID="c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135" Nov 28 13:55:15 crc kubenswrapper[4857]: I1128 13:55:15.291728 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135"} err="failed to get container status \"c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135\": rpc error: code = NotFound desc = could not find container \"c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135\": container with ID starting with c1dcaef15d4aab0862029c1c380665fb49a8e7b2e70709e10b934af5bb464135 not found: ID does not exist" Nov 28 13:55:16 crc kubenswrapper[4857]: I1128 13:55:16.240377 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" path="/var/lib/kubelet/pods/e1e13053-d5d0-4d38-8758-4ebf494ededb/volumes" Nov 28 13:55:16 crc kubenswrapper[4857]: E1128 13:55:16.719254 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:16 crc kubenswrapper[4857]: E1128 13:55:16.719353 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:55:24.719329351 +0000 UTC m=+1574.843270808 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:16 crc kubenswrapper[4857]: E1128 13:55:16.821313 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:16 crc kubenswrapper[4857]: E1128 13:55:16.821386 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts podName:8288e005-1d07-4989-bc64-64b3ecd62993 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:24.821371199 +0000 UTC m=+1574.945312636 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts") pod "novaapi379c-account-delete-7x584" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993") : configmap "openstack-scripts" not found Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.047130 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.047837 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.048079 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.048499 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.048641 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.050616 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.053061 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:17 crc kubenswrapper[4857]: E1128 13:55:17.053114 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 13:55:17 crc kubenswrapper[4857]: I1128 13:55:17.598928 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:17 crc kubenswrapper[4857]: I1128 13:55:17.599014 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:17 crc kubenswrapper[4857]: I1128 13:55:17.658059 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-6b7846d5d5-ddbqf" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.155:9696/\": dial tcp 10.217.0.155:9696: connect: connection refused" Nov 28 13:55:17 crc kubenswrapper[4857]: I1128 13:55:17.659769 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:18 crc kubenswrapper[4857]: I1128 13:55:18.932038 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.056711 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-httpd-config\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.057199 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-public-tls-certs\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.057220 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttdd4\" (UniqueName: \"kubernetes.io/projected/960b2298-15f9-436b-93c9-04b0617c0c9b-kube-api-access-ttdd4\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.057302 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-config\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.057354 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-combined-ca-bundle\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.057388 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-ovndb-tls-certs\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.057422 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-internal-tls-certs\") pod \"960b2298-15f9-436b-93c9-04b0617c0c9b\" (UID: \"960b2298-15f9-436b-93c9-04b0617c0c9b\") " Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.063522 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/960b2298-15f9-436b-93c9-04b0617c0c9b-kube-api-access-ttdd4" (OuterVolumeSpecName: "kube-api-access-ttdd4") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "kube-api-access-ttdd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.064583 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.101793 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-config" (OuterVolumeSpecName: "config") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.105307 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.108210 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.113108 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.159421 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.159463 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.159477 4857 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.159488 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.159502 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttdd4\" (UniqueName: \"kubernetes.io/projected/960b2298-15f9-436b-93c9-04b0617c0c9b-kube-api-access-ttdd4\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.159515 4857 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.654062 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "960b2298-15f9-436b-93c9-04b0617c0c9b" (UID: "960b2298-15f9-436b-93c9-04b0617c0c9b"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.666742 4857 generic.go:334] "Generic (PLEG): container finished" podID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerID="f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c" exitCode=0 Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.666793 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b7846d5d5-ddbqf" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.666813 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b7846d5d5-ddbqf" event={"ID":"960b2298-15f9-436b-93c9-04b0617c0c9b","Type":"ContainerDied","Data":"f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c"} Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.666860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b7846d5d5-ddbqf" event={"ID":"960b2298-15f9-436b-93c9-04b0617c0c9b","Type":"ContainerDied","Data":"97bc4d2b38fa9e4d4e921a4fcbc290ddc0e73b0481af2a2ce20714601f8a74b5"} Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.666888 4857 scope.go:117] "RemoveContainer" containerID="47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.667839 4857 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/960b2298-15f9-436b-93c9-04b0617c0c9b-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.715305 4857 scope.go:117] "RemoveContainer" containerID="f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.721903 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6b7846d5d5-ddbqf"] Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.729178 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6b7846d5d5-ddbqf"] Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.738681 4857 scope.go:117] "RemoveContainer" containerID="47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc" Nov 28 13:55:19 crc kubenswrapper[4857]: E1128 13:55:19.739295 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc\": container with ID starting with 47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc not found: ID does not exist" containerID="47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.739344 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc"} err="failed to get container status \"47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc\": rpc error: code = NotFound desc = could not find container \"47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc\": container with ID starting with 47bbdb9e28bdbe71f7a851efff32886322b17e3b9bc0735fe15a53e277caf7fc not found: ID does not exist" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.739379 4857 scope.go:117] "RemoveContainer" containerID="f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c" Nov 28 13:55:19 crc kubenswrapper[4857]: E1128 13:55:19.739960 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c\": container with ID starting with f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c not found: ID does not exist" containerID="f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c" Nov 28 13:55:19 crc kubenswrapper[4857]: I1128 13:55:19.739985 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c"} err="failed to get container status \"f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c\": rpc error: code = NotFound desc = could not find container \"f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c\": container with ID starting with f03c59fbce1ae6f2cf02e7c1ad44973e4ba6366fbbed5e71f01df183b4e8db9c not found: ID does not exist" Nov 28 13:55:20 crc kubenswrapper[4857]: I1128 13:55:20.238313 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" path="/var/lib/kubelet/pods/960b2298-15f9-436b-93c9-04b0617c0c9b/volumes" Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.045118 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.046216 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.046243 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.046839 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.046891 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.047588 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.049126 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:22 crc kubenswrapper[4857]: E1128 13:55:22.049168 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 13:55:24 crc kubenswrapper[4857]: E1128 13:55:24.745558 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:24 crc kubenswrapper[4857]: E1128 13:55:24.745926 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:55:40.745909285 +0000 UTC m=+1590.869850722 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:24 crc kubenswrapper[4857]: E1128 13:55:24.847098 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:24 crc kubenswrapper[4857]: E1128 13:55:24.847214 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts podName:8288e005-1d07-4989-bc64-64b3ecd62993 nodeName:}" failed. No retries permitted until 2025-11-28 13:55:40.847199073 +0000 UTC m=+1590.971140500 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts") pod "novaapi379c-account-delete-7x584" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993") : configmap "openstack-scripts" not found Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.045055 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.045379 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.045716 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.045748 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.047403 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.049380 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.051355 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:27 crc kubenswrapper[4857]: E1128 13:55:27.051447 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 13:55:27 crc kubenswrapper[4857]: I1128 13:55:27.658156 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:27 crc kubenswrapper[4857]: I1128 13:55:27.701437 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpnwx"] Nov 28 13:55:27 crc kubenswrapper[4857]: I1128 13:55:27.738570 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-cpnwx" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="registry-server" containerID="cri-o://372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83" gracePeriod=2 Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.696471 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.751493 4857 generic.go:334] "Generic (PLEG): container finished" podID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerID="372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83" exitCode=0 Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.751560 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpnwx" event={"ID":"7f9668e3-0293-4d8f-aa56-ad830134b0e4","Type":"ContainerDied","Data":"372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83"} Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.751831 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-cpnwx" event={"ID":"7f9668e3-0293-4d8f-aa56-ad830134b0e4","Type":"ContainerDied","Data":"dcb8094d70ba370f0684fdfa21268925212eb09ae0f3a0480d7b6dc1fcb8cd59"} Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.751593 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-cpnwx" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.751896 4857 scope.go:117] "RemoveContainer" containerID="372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.789789 4857 scope.go:117] "RemoveContainer" containerID="ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.807865 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8sgbz\" (UniqueName: \"kubernetes.io/projected/7f9668e3-0293-4d8f-aa56-ad830134b0e4-kube-api-access-8sgbz\") pod \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.808025 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-catalog-content\") pod \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.808085 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-utilities\") pod \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\" (UID: \"7f9668e3-0293-4d8f-aa56-ad830134b0e4\") " Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.809557 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-utilities" (OuterVolumeSpecName: "utilities") pod "7f9668e3-0293-4d8f-aa56-ad830134b0e4" (UID: "7f9668e3-0293-4d8f-aa56-ad830134b0e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.811845 4857 scope.go:117] "RemoveContainer" containerID="85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.814170 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f9668e3-0293-4d8f-aa56-ad830134b0e4-kube-api-access-8sgbz" (OuterVolumeSpecName: "kube-api-access-8sgbz") pod "7f9668e3-0293-4d8f-aa56-ad830134b0e4" (UID: "7f9668e3-0293-4d8f-aa56-ad830134b0e4"). InnerVolumeSpecName "kube-api-access-8sgbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.862136 4857 scope.go:117] "RemoveContainer" containerID="372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83" Nov 28 13:55:28 crc kubenswrapper[4857]: E1128 13:55:28.862590 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83\": container with ID starting with 372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83 not found: ID does not exist" containerID="372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.862701 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83"} err="failed to get container status \"372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83\": rpc error: code = NotFound desc = could not find container \"372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83\": container with ID starting with 372a3dee7cd3722d8a73533e20a2f50deb6a67a113d54693038ebaa12d6c8a83 not found: ID does not exist" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.862784 4857 scope.go:117] "RemoveContainer" containerID="ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583" Nov 28 13:55:28 crc kubenswrapper[4857]: E1128 13:55:28.863329 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583\": container with ID starting with ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583 not found: ID does not exist" containerID="ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.863423 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583"} err="failed to get container status \"ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583\": rpc error: code = NotFound desc = could not find container \"ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583\": container with ID starting with ae39056cc66de7f497a956b990f30bd3eaffff96baacb26893867aa11093d583 not found: ID does not exist" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.863509 4857 scope.go:117] "RemoveContainer" containerID="85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59" Nov 28 13:55:28 crc kubenswrapper[4857]: E1128 13:55:28.864372 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59\": container with ID starting with 85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59 not found: ID does not exist" containerID="85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.864457 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59"} err="failed to get container status \"85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59\": rpc error: code = NotFound desc = could not find container \"85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59\": container with ID starting with 85fa48bc2daeb1abb5696dff5b9fc7598558f901326e8bd7d8d3e9deaeb06f59 not found: ID does not exist" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.864694 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f9668e3-0293-4d8f-aa56-ad830134b0e4" (UID: "7f9668e3-0293-4d8f-aa56-ad830134b0e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.910280 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8sgbz\" (UniqueName: \"kubernetes.io/projected/7f9668e3-0293-4d8f-aa56-ad830134b0e4-kube-api-access-8sgbz\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.910325 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:28 crc kubenswrapper[4857]: I1128 13:55:28.910337 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f9668e3-0293-4d8f-aa56-ad830134b0e4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:29 crc kubenswrapper[4857]: I1128 13:55:29.091487 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-cpnwx"] Nov 28 13:55:29 crc kubenswrapper[4857]: I1128 13:55:29.101382 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-cpnwx"] Nov 28 13:55:30 crc kubenswrapper[4857]: I1128 13:55:30.246830 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" path="/var/lib/kubelet/pods/7f9668e3-0293-4d8f-aa56-ad830134b0e4/volumes" Nov 28 13:55:31 crc kubenswrapper[4857]: I1128 13:55:31.804911 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hgm54_21fe1399-7f40-43ec-bee8-868c937a6e19/ovs-vswitchd/0.log" Nov 28 13:55:31 crc kubenswrapper[4857]: I1128 13:55:31.805685 4857 generic.go:334] "Generic (PLEG): container finished" podID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" exitCode=137 Nov 28 13:55:31 crc kubenswrapper[4857]: I1128 13:55:31.805724 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerDied","Data":"b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207"} Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.047922 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207 is running failed: container process not found" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.048346 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.049274 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.049313 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207 is running failed: container process not found" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.049682 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.049721 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207 is running failed: container process not found" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.049719 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 13:55:32 crc kubenswrapper[4857]: E1128 13:55:32.049764 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-hgm54" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.197670 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hgm54_21fe1399-7f40-43ec-bee8-868c937a6e19/ovs-vswitchd/0.log" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.198352 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367510 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-76ngp\" (UniqueName: \"kubernetes.io/projected/21fe1399-7f40-43ec-bee8-868c937a6e19-kube-api-access-76ngp\") pod \"21fe1399-7f40-43ec-bee8-868c937a6e19\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367572 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-lib\") pod \"21fe1399-7f40-43ec-bee8-868c937a6e19\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367663 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-etc-ovs\") pod \"21fe1399-7f40-43ec-bee8-868c937a6e19\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367685 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-log\") pod \"21fe1399-7f40-43ec-bee8-868c937a6e19\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367710 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/21fe1399-7f40-43ec-bee8-868c937a6e19-scripts\") pod \"21fe1399-7f40-43ec-bee8-868c937a6e19\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367708 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-lib" (OuterVolumeSpecName: "var-lib") pod "21fe1399-7f40-43ec-bee8-868c937a6e19" (UID: "21fe1399-7f40-43ec-bee8-868c937a6e19"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367769 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-run\") pod \"21fe1399-7f40-43ec-bee8-868c937a6e19\" (UID: \"21fe1399-7f40-43ec-bee8-868c937a6e19\") " Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367786 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-log" (OuterVolumeSpecName: "var-log") pod "21fe1399-7f40-43ec-bee8-868c937a6e19" (UID: "21fe1399-7f40-43ec-bee8-868c937a6e19"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.367803 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "21fe1399-7f40-43ec-bee8-868c937a6e19" (UID: "21fe1399-7f40-43ec-bee8-868c937a6e19"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.368073 4857 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-lib\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.368086 4857 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.368093 4857 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-log\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.368269 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-run" (OuterVolumeSpecName: "var-run") pod "21fe1399-7f40-43ec-bee8-868c937a6e19" (UID: "21fe1399-7f40-43ec-bee8-868c937a6e19"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.368814 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21fe1399-7f40-43ec-bee8-868c937a6e19-scripts" (OuterVolumeSpecName: "scripts") pod "21fe1399-7f40-43ec-bee8-868c937a6e19" (UID: "21fe1399-7f40-43ec-bee8-868c937a6e19"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.375226 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21fe1399-7f40-43ec-bee8-868c937a6e19-kube-api-access-76ngp" (OuterVolumeSpecName: "kube-api-access-76ngp") pod "21fe1399-7f40-43ec-bee8-868c937a6e19" (UID: "21fe1399-7f40-43ec-bee8-868c937a6e19"). InnerVolumeSpecName "kube-api-access-76ngp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.469548 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-76ngp\" (UniqueName: \"kubernetes.io/projected/21fe1399-7f40-43ec-bee8-868c937a6e19-kube-api-access-76ngp\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.469974 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/21fe1399-7f40-43ec-bee8-868c937a6e19-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.470001 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/21fe1399-7f40-43ec-bee8-868c937a6e19-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.824729 4857 generic.go:334] "Generic (PLEG): container finished" podID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerID="cf76c7b61e8171e795785fc16f94d5afd912c93529cbd3ce652f846abc4be50f" exitCode=137 Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.824788 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"cf76c7b61e8171e795785fc16f94d5afd912c93529cbd3ce652f846abc4be50f"} Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.826555 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-hgm54_21fe1399-7f40-43ec-bee8-868c937a6e19/ovs-vswitchd/0.log" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.827143 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-hgm54" event={"ID":"21fe1399-7f40-43ec-bee8-868c937a6e19","Type":"ContainerDied","Data":"80e5d3025c6d5fe4a28030191cceb93e94c859ede9bd97436b6a592eec5a348b"} Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.827184 4857 scope.go:117] "RemoveContainer" containerID="b92ad2228277ac1c1d0e0ba05d4fc59c373322274d92f626373330dc03f6a207" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.827296 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-hgm54" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.883121 4857 scope.go:117] "RemoveContainer" containerID="a05466f926c2c39424a5b6f019eb09ff5ade578e55bb3ec6ddb2e7cd737bcc17" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.892776 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.893140 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-hgm54"] Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.899400 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-hgm54"] Nov 28 13:55:32 crc kubenswrapper[4857]: I1128 13:55:32.905441 4857 scope.go:117] "RemoveContainer" containerID="8dabeda087aaf27aa60292912e3ecf4bbf1cddb7f5732a95a3564f65f7d30482" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.077269 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") pod \"7dacf187-3671-4114-a93e-e2296c8c20b2\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.077376 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"7dacf187-3671-4114-a93e-e2296c8c20b2\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.077433 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-lock\") pod \"7dacf187-3671-4114-a93e-e2296c8c20b2\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.077486 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-cache\") pod \"7dacf187-3671-4114-a93e-e2296c8c20b2\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.077552 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kjqcd\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-kube-api-access-kjqcd\") pod \"7dacf187-3671-4114-a93e-e2296c8c20b2\" (UID: \"7dacf187-3671-4114-a93e-e2296c8c20b2\") " Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.078007 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-lock" (OuterVolumeSpecName: "lock") pod "7dacf187-3671-4114-a93e-e2296c8c20b2" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.078075 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-cache" (OuterVolumeSpecName: "cache") pod "7dacf187-3671-4114-a93e-e2296c8c20b2" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.080976 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "swift") pod "7dacf187-3671-4114-a93e-e2296c8c20b2" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.081491 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-kube-api-access-kjqcd" (OuterVolumeSpecName: "kube-api-access-kjqcd") pod "7dacf187-3671-4114-a93e-e2296c8c20b2" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2"). InnerVolumeSpecName "kube-api-access-kjqcd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.083770 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "7dacf187-3671-4114-a93e-e2296c8c20b2" (UID: "7dacf187-3671-4114-a93e-e2296c8c20b2"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.178933 4857 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.179021 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.179041 4857 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-lock\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.179055 4857 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/7dacf187-3671-4114-a93e-e2296c8c20b2-cache\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.179071 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kjqcd\" (UniqueName: \"kubernetes.io/projected/7dacf187-3671-4114-a93e-e2296c8c20b2-kube-api-access-kjqcd\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.199846 4857 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.285187 4857 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.845622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"7dacf187-3671-4114-a93e-e2296c8c20b2","Type":"ContainerDied","Data":"54af9ca9a947932fbcf2afe5ca523893b1211c8003a55f120e619d2e792d7265"} Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.845993 4857 scope.go:117] "RemoveContainer" containerID="cf76c7b61e8171e795785fc16f94d5afd912c93529cbd3ce652f846abc4be50f" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.845740 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.867157 4857 scope.go:117] "RemoveContainer" containerID="75a66b9748e2f49b7fe56ad9c99da91918be5cc9c7c5b50c82a8f29587c6dd41" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.884107 4857 scope.go:117] "RemoveContainer" containerID="9636087b646b01f79b8ba6470a873b59e807cd17b6f1e033005a6e5655b75269" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.885662 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.893917 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.905101 4857 scope.go:117] "RemoveContainer" containerID="f27c04a27d6f3fb5f92f4acda7c42185f0f649ff426b2f0c0fc82c87eb5c2df2" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.922334 4857 scope.go:117] "RemoveContainer" containerID="1dba2da3e5dc861fc5c8a1daae2fd1574ec3c6ba40ffdb0cbfcb19d46be889c1" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.942656 4857 scope.go:117] "RemoveContainer" containerID="797af21b1a27903599b036f4c694bee114318cb17d785c9ef036ff8854701e9f" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.967191 4857 scope.go:117] "RemoveContainer" containerID="fca2aa8f676e96e45c93a697eeabe06bb4bf6351b5998989a5d94cd1c765da97" Nov 28 13:55:33 crc kubenswrapper[4857]: I1128 13:55:33.984773 4857 scope.go:117] "RemoveContainer" containerID="bd6d4921953db76ad5068a581723271ca2d557ab2915b227c4ec3a2e35dbb714" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.006389 4857 scope.go:117] "RemoveContainer" containerID="2761311c0d0657f7895e8425cee039b5e9d1f6b44d147413193ce1a637e0d206" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.025628 4857 scope.go:117] "RemoveContainer" containerID="043905c220a58f2cc3f72c7c5ff10d14437639fd54c68815c19eb821d3f8691b" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.046783 4857 scope.go:117] "RemoveContainer" containerID="336ae6d3cd985dc92a4379227cc3a7df30cdbac847520f8327e5fce00a85e01c" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.070152 4857 scope.go:117] "RemoveContainer" containerID="684d617680cd018b845bc46ea83aaf268880e5e96d90c1f4c74455668100a5fb" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.090151 4857 scope.go:117] "RemoveContainer" containerID="7f8c7069308f0a6173f6d99219cd32e4aee978bcd30684463830f8831fa3dc54" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.108593 4857 scope.go:117] "RemoveContainer" containerID="4e216ae629f3bf57169773aa0918d2098ca8722a57b52a0b59f920ee5fe40042" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.128460 4857 scope.go:117] "RemoveContainer" containerID="997f645a4bf32bedcf75f896750b179b3b64c864f0aa44fb1505c0ce4a2004d3" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.243644 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" path="/var/lib/kubelet/pods/21fe1399-7f40-43ec-bee8-868c937a6e19/volumes" Nov 28 13:55:34 crc kubenswrapper[4857]: I1128 13:55:34.244734 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" path="/var/lib/kubelet/pods/7dacf187-3671-4114-a93e-e2296c8c20b2/volumes" Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.331736 4857 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod2dfd7b5e-9e1b-4f85-9933-2f3f55cee399"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod2dfd7b5e-9e1b-4f85-9933-2f3f55cee399] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2dfd7b5e_9e1b_4f85_9933_2f3f55cee399.slice" Nov 28 13:55:37 crc kubenswrapper[4857]: E1128 13:55:37.332061 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod2dfd7b5e-9e1b-4f85-9933-2f3f55cee399] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod2dfd7b5e-9e1b-4f85-9933-2f3f55cee399] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2dfd7b5e_9e1b_4f85_9933_2f3f55cee399.slice" pod="openstack/ovsdbserver-sb-0" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.335553 4857 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","poddceb7667-07bc-486b-b65f-c87427949ffd"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort poddceb7667-07bc-486b-b65f-c87427949ffd] : Timed out while waiting for systemd to remove kubepods-besteffort-poddceb7667_07bc_486b_b65f_c87427949ffd.slice" Nov 28 13:55:37 crc kubenswrapper[4857]: E1128 13:55:37.335607 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort poddceb7667-07bc-486b-b65f-c87427949ffd] : unable to destroy cgroup paths for cgroup [kubepods besteffort poddceb7667-07bc-486b-b65f-c87427949ffd] : Timed out while waiting for systemd to remove kubepods-besteffort-poddceb7667_07bc_486b_b65f_c87427949ffd.slice" pod="openstack/ovsdbserver-nb-0" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.883203 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.883203 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.964274 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.973871 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.982259 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:55:37 crc kubenswrapper[4857]: I1128 13:55:37.992527 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 13:55:38 crc kubenswrapper[4857]: I1128 13:55:38.239023 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dfd7b5e-9e1b-4f85-9933-2f3f55cee399" path="/var/lib/kubelet/pods/2dfd7b5e-9e1b-4f85-9933-2f3f55cee399/volumes" Nov 28 13:55:38 crc kubenswrapper[4857]: I1128 13:55:38.240086 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dceb7667-07bc-486b-b65f-c87427949ffd" path="/var/lib/kubelet/pods/dceb7667-07bc-486b-b65f-c87427949ffd/volumes" Nov 28 13:55:38 crc kubenswrapper[4857]: I1128 13:55:38.478524 4857 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod2bf149d2-9beb-4394-921a-a703473391aa"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod2bf149d2-9beb-4394-921a-a703473391aa] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2bf149d2_9beb_4394_921a_a703473391aa.slice" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.734168 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.787275 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hrgvk\" (UniqueName: \"kubernetes.io/projected/e1db40e4-bf66-4d82-aa94-c54d44513220-kube-api-access-hrgvk\") pod \"e1db40e4-bf66-4d82-aa94-c54d44513220\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.787372 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1db40e4-bf66-4d82-aa94-c54d44513220-operator-scripts\") pod \"e1db40e4-bf66-4d82-aa94-c54d44513220\" (UID: \"e1db40e4-bf66-4d82-aa94-c54d44513220\") " Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.788504 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1db40e4-bf66-4d82-aa94-c54d44513220-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1db40e4-bf66-4d82-aa94-c54d44513220" (UID: "e1db40e4-bf66-4d82-aa94-c54d44513220"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.795109 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1db40e4-bf66-4d82-aa94-c54d44513220-kube-api-access-hrgvk" (OuterVolumeSpecName: "kube-api-access-hrgvk") pod "e1db40e4-bf66-4d82-aa94-c54d44513220" (UID: "e1db40e4-bf66-4d82-aa94-c54d44513220"). InnerVolumeSpecName "kube-api-access-hrgvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.889180 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1db40e4-bf66-4d82-aa94-c54d44513220-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.889224 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hrgvk\" (UniqueName: \"kubernetes.io/projected/e1db40e4-bf66-4d82-aa94-c54d44513220-kube-api-access-hrgvk\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.904126 4857 generic.go:334] "Generic (PLEG): container finished" podID="e1db40e4-bf66-4d82-aa94-c54d44513220" containerID="62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c" exitCode=137 Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.904168 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0837f-account-delete-fhf8k" event={"ID":"e1db40e4-bf66-4d82-aa94-c54d44513220","Type":"ContainerDied","Data":"62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c"} Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.904192 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0837f-account-delete-fhf8k" event={"ID":"e1db40e4-bf66-4d82-aa94-c54d44513220","Type":"ContainerDied","Data":"199bdb20409593a25b9f958703eabbf5816d41bd1fe2394b910fbf1883dc3643"} Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.904211 4857 scope.go:117] "RemoveContainer" containerID="62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.904320 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0837f-account-delete-fhf8k" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.932257 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0837f-account-delete-fhf8k"] Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.939806 4857 scope.go:117] "RemoveContainer" containerID="62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.939965 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0837f-account-delete-fhf8k"] Nov 28 13:55:39 crc kubenswrapper[4857]: E1128 13:55:39.942696 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c\": container with ID starting with 62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c not found: ID does not exist" containerID="62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c" Nov 28 13:55:39 crc kubenswrapper[4857]: I1128 13:55:39.942753 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c"} err="failed to get container status \"62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c\": rpc error: code = NotFound desc = could not find container \"62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c\": container with ID starting with 62f4e36825688f44319c209c848cc0648dd26d49c8a8e49ee229742999c6a64c not found: ID does not exist" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.250082 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1db40e4-bf66-4d82-aa94-c54d44513220" path="/var/lib/kubelet/pods/e1db40e4-bf66-4d82-aa94-c54d44513220/volumes" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.484303 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.497609 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7pgl\" (UniqueName: \"kubernetes.io/projected/8288e005-1d07-4989-bc64-64b3ecd62993-kube-api-access-m7pgl\") pod \"8288e005-1d07-4989-bc64-64b3ecd62993\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.497694 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts\") pod \"8288e005-1d07-4989-bc64-64b3ecd62993\" (UID: \"8288e005-1d07-4989-bc64-64b3ecd62993\") " Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.498477 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8288e005-1d07-4989-bc64-64b3ecd62993" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.504215 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8288e005-1d07-4989-bc64-64b3ecd62993-kube-api-access-m7pgl" (OuterVolumeSpecName: "kube-api-access-m7pgl") pod "8288e005-1d07-4989-bc64-64b3ecd62993" (UID: "8288e005-1d07-4989-bc64-64b3ecd62993"). InnerVolumeSpecName "kube-api-access-m7pgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.598987 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7pgl\" (UniqueName: \"kubernetes.io/projected/8288e005-1d07-4989-bc64-64b3ecd62993-kube-api-access-m7pgl\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.599018 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8288e005-1d07-4989-bc64-64b3ecd62993-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:40 crc kubenswrapper[4857]: E1128 13:55:40.801360 4857 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 28 13:55:40 crc kubenswrapper[4857]: E1128 13:55:40.801446 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts podName:2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd nodeName:}" failed. No retries permitted until 2025-11-28 13:56:12.801425394 +0000 UTC m=+1622.925366831 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts") pod "barbican8199-account-delete-z9b9h" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd") : configmap "openstack-scripts" not found Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.921713 4857 generic.go:334] "Generic (PLEG): container finished" podID="8288e005-1d07-4989-bc64-64b3ecd62993" containerID="29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95" exitCode=137 Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.921751 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi379c-account-delete-7x584" event={"ID":"8288e005-1d07-4989-bc64-64b3ecd62993","Type":"ContainerDied","Data":"29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95"} Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.921779 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapi379c-account-delete-7x584" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.921803 4857 scope.go:117] "RemoveContainer" containerID="29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.921789 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapi379c-account-delete-7x584" event={"ID":"8288e005-1d07-4989-bc64-64b3ecd62993","Type":"ContainerDied","Data":"9f8f48cfed230fc4f1a81c9763ba14618bf0a552a05748b0a96536a0290ba742"} Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.950916 4857 scope.go:117] "RemoveContainer" containerID="29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95" Nov 28 13:55:40 crc kubenswrapper[4857]: E1128 13:55:40.951605 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95\": container with ID starting with 29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95 not found: ID does not exist" containerID="29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.951650 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95"} err="failed to get container status \"29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95\": rpc error: code = NotFound desc = could not find container \"29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95\": container with ID starting with 29ebf0561572ef46ce57cfc73df73475b6e96599b050827b8ce6ce81e4b1ad95 not found: ID does not exist" Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.963816 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapi379c-account-delete-7x584"] Nov 28 13:55:40 crc kubenswrapper[4857]: I1128 13:55:40.971658 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapi379c-account-delete-7x584"] Nov 28 13:55:41 crc kubenswrapper[4857]: I1128 13:55:41.309023 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:55:41 crc kubenswrapper[4857]: I1128 13:55:41.309097 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.240892 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8288e005-1d07-4989-bc64-64b3ecd62993" path="/var/lib/kubelet/pods/8288e005-1d07-4989-bc64-64b3ecd62993/volumes" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.374200 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.526861 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2f9bt\" (UniqueName: \"kubernetes.io/projected/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-kube-api-access-2f9bt\") pod \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.527049 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts\") pod \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\" (UID: \"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd\") " Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.527639 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.530655 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-kube-api-access-2f9bt" (OuterVolumeSpecName: "kube-api-access-2f9bt") pod "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" (UID: "2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd"). InnerVolumeSpecName "kube-api-access-2f9bt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.628652 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.628698 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2f9bt\" (UniqueName: \"kubernetes.io/projected/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd-kube-api-access-2f9bt\") on node \"crc\" DevicePath \"\"" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.952652 4857 generic.go:334] "Generic (PLEG): container finished" podID="2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" containerID="0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13" exitCode=137 Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.952698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8199-account-delete-z9b9h" event={"ID":"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd","Type":"ContainerDied","Data":"0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13"} Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.952728 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican8199-account-delete-z9b9h" event={"ID":"2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd","Type":"ContainerDied","Data":"108c5b4f3809bd209d4fa13ccf785df4865d3897aea55b48f09fd9c1415f820e"} Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.952749 4857 scope.go:117] "RemoveContainer" containerID="0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.952879 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican8199-account-delete-z9b9h" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.991635 4857 scope.go:117] "RemoveContainer" containerID="0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13" Nov 28 13:55:42 crc kubenswrapper[4857]: E1128 13:55:42.992396 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13\": container with ID starting with 0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13 not found: ID does not exist" containerID="0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.992487 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13"} err="failed to get container status \"0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13\": rpc error: code = NotFound desc = could not find container \"0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13\": container with ID starting with 0c6047c8f93bc6be4f26062491195b4ab4317552f00ce7119fad6fe44c135c13 not found: ID does not exist" Nov 28 13:55:42 crc kubenswrapper[4857]: I1128 13:55:42.992633 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican8199-account-delete-z9b9h"] Nov 28 13:55:43 crc kubenswrapper[4857]: I1128 13:55:43.005154 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican8199-account-delete-z9b9h"] Nov 28 13:55:44 crc kubenswrapper[4857]: I1128 13:55:44.239391 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" path="/var/lib/kubelet/pods/2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd/volumes" Nov 28 13:56:11 crc kubenswrapper[4857]: I1128 13:56:11.308731 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 13:56:11 crc kubenswrapper[4857]: I1128 13:56:11.309321 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 13:56:11 crc kubenswrapper[4857]: I1128 13:56:11.309365 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 13:56:11 crc kubenswrapper[4857]: I1128 13:56:11.309924 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 13:56:11 crc kubenswrapper[4857]: I1128 13:56:11.309983 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" gracePeriod=600 Nov 28 13:56:11 crc kubenswrapper[4857]: E1128 13:56:11.430528 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.236372 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" exitCode=0 Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.240387 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123"} Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.240494 4857 scope.go:117] "RemoveContainer" containerID="46b7cc46f703cadc54c5480b2eebc620053c8d6a8106cbc200c8eb138aca1d8a" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.241602 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:56:12 crc kubenswrapper[4857]: E1128 13:56:12.242111 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.642521 4857 scope.go:117] "RemoveContainer" containerID="07476080bfc0082bdd51a54d167f1b8f0849c9dddec8d7f0531eba67b8803b47" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.663221 4857 scope.go:117] "RemoveContainer" containerID="8a5e02322a0c92ef92403b10c46418a6d8a41c31a9a29fb0afbfbbabaa3daa7d" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.695062 4857 scope.go:117] "RemoveContainer" containerID="42941d6c5b0a04155e45168b57ba9dd956d33497bb2e71cd6e0569353039e78f" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.726469 4857 scope.go:117] "RemoveContainer" containerID="7dafe86e22a18fbebf5e7e7d5612cc7d57a0117fe9637462e2f483a803153f13" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.751735 4857 scope.go:117] "RemoveContainer" containerID="400c9c0eea1ca4656bcb7228bc24857d00720d04b36a80f5d974335993c0803f" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.773978 4857 scope.go:117] "RemoveContainer" containerID="cebb43c89e8c652d06298486abdb513d986747ec70932f5f374c742ceeb7cc9f" Nov 28 13:56:12 crc kubenswrapper[4857]: I1128 13:56:12.803226 4857 scope.go:117] "RemoveContainer" containerID="151e26b048646c370d13de720160a60c4f2f03b89357ec4d11d13d8e86d159a4" Nov 28 13:56:27 crc kubenswrapper[4857]: I1128 13:56:27.229273 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:56:27 crc kubenswrapper[4857]: E1128 13:56:27.229975 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:56:42 crc kubenswrapper[4857]: I1128 13:56:42.229199 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:56:42 crc kubenswrapper[4857]: E1128 13:56:42.230022 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:56:54 crc kubenswrapper[4857]: I1128 13:56:54.229358 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:56:54 crc kubenswrapper[4857]: E1128 13:56:54.230102 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:57:07 crc kubenswrapper[4857]: I1128 13:57:07.229421 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:57:07 crc kubenswrapper[4857]: E1128 13:57:07.231621 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.153700 4857 scope.go:117] "RemoveContainer" containerID="8308e427b7faf625a092145ecb316284c50212e2707d538795b44f416b0d86a3" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.195430 4857 scope.go:117] "RemoveContainer" containerID="a608443173e09bdc122142915f62f799bb04de43dba7d386430145ce82247138" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.273352 4857 scope.go:117] "RemoveContainer" containerID="71ec8c59a35a35a64c7fc0c905ef1b6853ea3563d5aeb022b63a5ab844099b86" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.333380 4857 scope.go:117] "RemoveContainer" containerID="9fab81523ebe7c639a203d355f9824824a56a7ef9ae0b6418036872dfca60c23" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.355102 4857 scope.go:117] "RemoveContainer" containerID="72bfcc6e31ffdfc856e26b2344ae2a527b77914dd30b292c69535d00d195afeb" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.431255 4857 scope.go:117] "RemoveContainer" containerID="3bfaa6d12b2af65b2c4bc9e67c77c455db443837bdebce53cc1736765094b03f" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.466108 4857 scope.go:117] "RemoveContainer" containerID="efbe8acece666741759d5f9a21f2800249919a91366fee8526441705dee2a5dc" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.491736 4857 scope.go:117] "RemoveContainer" containerID="130df371f30e01f12e47e3db026bd7dbc2ef67bb3088c20d0b0da3ed10562080" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.518195 4857 scope.go:117] "RemoveContainer" containerID="b5d127bdd61fecc173b260c4f1511da50b4e298e9d3039f6515ad85b7a11a048" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.551643 4857 scope.go:117] "RemoveContainer" containerID="9e1c7df89c44e6547ef9b5da5406b57747caa7630294b5760a04f4cd26391b71" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.584130 4857 scope.go:117] "RemoveContainer" containerID="10b19e1335d4a44e15430b19e7ace5a55ec7270bd18000a142d0b76ec25527fa" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.625198 4857 scope.go:117] "RemoveContainer" containerID="a0d911c80b3a48f7fb005ed2c796f1f446d2c4a6a5bdf08ff5f72bbc9aaec0f8" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.656845 4857 scope.go:117] "RemoveContainer" containerID="f69cd95a48cf6d256599abb9dd70bf305cf5ea79a69a64ffbfe2a65b72da84f1" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.686619 4857 scope.go:117] "RemoveContainer" containerID="4deb4acfd0651a7814aadc87831b4c8900ce8d2bd9486f8f6a0ff81b7dd4ddc1" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.710758 4857 scope.go:117] "RemoveContainer" containerID="627f4ebe6bb78ae8ff32c46dccaaae1575f38a3fc98a89b60a2ba53081e516aa" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.741358 4857 scope.go:117] "RemoveContainer" containerID="d6ccb0e6b21baea5a7b5a6eca4876d506f37cb827f59f9ec638ab87806a36902" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.772547 4857 scope.go:117] "RemoveContainer" containerID="8c8913b9cca44eed468d286bc63c3d77ee68dc07c0d789961886533d02b3aad5" Nov 28 13:57:13 crc kubenswrapper[4857]: I1128 13:57:13.817934 4857 scope.go:117] "RemoveContainer" containerID="f92e33ffe665a2c08209c1d9b0953183209fcc8c900929235b85b2c7815d4fcb" Nov 28 13:57:19 crc kubenswrapper[4857]: I1128 13:57:19.229108 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:57:19 crc kubenswrapper[4857]: E1128 13:57:19.229966 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:57:32 crc kubenswrapper[4857]: I1128 13:57:32.228594 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:57:32 crc kubenswrapper[4857]: E1128 13:57:32.229404 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:57:44 crc kubenswrapper[4857]: I1128 13:57:44.229717 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:57:44 crc kubenswrapper[4857]: E1128 13:57:44.231171 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:57:57 crc kubenswrapper[4857]: I1128 13:57:57.230003 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:57:57 crc kubenswrapper[4857]: E1128 13:57:57.230764 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:58:12 crc kubenswrapper[4857]: I1128 13:58:12.228471 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:58:12 crc kubenswrapper[4857]: E1128 13:58:12.229052 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.211567 4857 scope.go:117] "RemoveContainer" containerID="5eb527ac4bbe9bee9521dab12726c27c00c910564fa737354123b784c289d522" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.240274 4857 scope.go:117] "RemoveContainer" containerID="b59a97768689c496e41691f3b6f3693124ea51c593b2a2c5ee62bcd9479b8ee4" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.269499 4857 scope.go:117] "RemoveContainer" containerID="469ba51de67f78e9d88dca6693a397f6a6636c87ca3e4120dd8b5a9837fd6dc3" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.294586 4857 scope.go:117] "RemoveContainer" containerID="83ec5382a77be899a0142fdd5c00d8ed5b441bf19df2a875fdd024a926571dce" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.334040 4857 scope.go:117] "RemoveContainer" containerID="dac2f631af2dd13cbefd63029290ff4e103fbb366fa60c50cb33a3e5334757da" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.352488 4857 scope.go:117] "RemoveContainer" containerID="e1f8f8e9bc6117e87029e7ff11a91ae766918b1844404c4c2ebb9d051e3cab75" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.383703 4857 scope.go:117] "RemoveContainer" containerID="9b6c4bbe152e866f7792de341cdba73138d078a52439a73ea5c1560ffc423419" Nov 28 13:58:14 crc kubenswrapper[4857]: I1128 13:58:14.402983 4857 scope.go:117] "RemoveContainer" containerID="c8b8591445954b9e685515c84c1d977b8750fa6e93237284c1d33ce8ceabed21" Nov 28 13:58:25 crc kubenswrapper[4857]: I1128 13:58:25.229470 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:58:25 crc kubenswrapper[4857]: E1128 13:58:25.230878 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:58:36 crc kubenswrapper[4857]: I1128 13:58:36.229350 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:58:36 crc kubenswrapper[4857]: E1128 13:58:36.230034 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:58:50 crc kubenswrapper[4857]: I1128 13:58:50.237517 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:58:50 crc kubenswrapper[4857]: E1128 13:58:50.238490 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:59:04 crc kubenswrapper[4857]: I1128 13:59:04.229633 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:59:04 crc kubenswrapper[4857]: E1128 13:59:04.231086 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.488317 4857 scope.go:117] "RemoveContainer" containerID="b5bdcc04015076d00440bc794607b51a9b0e42405828b71f2c17a0fc67c38cfd" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.517203 4857 scope.go:117] "RemoveContainer" containerID="e27595f02f253a15d73f31e94dd3ca259868157d7312b06629d00ea7f038667e" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.542771 4857 scope.go:117] "RemoveContainer" containerID="9d3d6c09860350f192015fe259ed4a4581cfb3b28583b924e843bda2835e7fb6" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.569636 4857 scope.go:117] "RemoveContainer" containerID="625f21229df21e17ad567862d2f50eae24cbad68ce3c927378a2c02395fc6d18" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.587062 4857 scope.go:117] "RemoveContainer" containerID="3550e137178d18c59d49e08f8ba96f4865ee8708db2f6e837fe17fedaeee3539" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.604159 4857 scope.go:117] "RemoveContainer" containerID="361d22577f867f73846943f2f7d6ad8aa5bad3ecfd7152d2ce7d1676a728ad7b" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.627377 4857 scope.go:117] "RemoveContainer" containerID="089005b3ae005975141be945a4d75ab3ebb78652421b1e38e82689bc954e5745" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.662214 4857 scope.go:117] "RemoveContainer" containerID="2216da7127d601e0f56f42ae7929937c3281f01c4aa95f88133a482b0170b592" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.677367 4857 scope.go:117] "RemoveContainer" containerID="287f76eba3eb82eb4f37609b40da2cd1b1de08892bd1e15facb804dd6851a8c1" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.706746 4857 scope.go:117] "RemoveContainer" containerID="b739607a7d3bed26bfdef1e161fe7a512799ebf330a44835b6eebf8c36ffb70a" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.721649 4857 scope.go:117] "RemoveContainer" containerID="7d6b6e7e5e9cc36ce54a8b4ffd83d724c615708f5364d1ae2e912b47662fbf4c" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.739386 4857 scope.go:117] "RemoveContainer" containerID="7eaa667398c3e224c563e132d00cba92457ba8acc83d7c4ef18995674225d29a" Nov 28 13:59:14 crc kubenswrapper[4857]: I1128 13:59:14.758329 4857 scope.go:117] "RemoveContainer" containerID="8bf280eb12748bb49a64625337d8c5ff217351191d99af8e84f34162408eb879" Nov 28 13:59:16 crc kubenswrapper[4857]: I1128 13:59:16.230205 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:59:16 crc kubenswrapper[4857]: E1128 13:59:16.231499 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:59:28 crc kubenswrapper[4857]: I1128 13:59:28.229152 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:59:28 crc kubenswrapper[4857]: E1128 13:59:28.229853 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:59:41 crc kubenswrapper[4857]: I1128 13:59:41.228309 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:59:41 crc kubenswrapper[4857]: E1128 13:59:41.229055 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 13:59:52 crc kubenswrapper[4857]: I1128 13:59:52.229268 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 13:59:52 crc kubenswrapper[4857]: E1128 13:59:52.230090 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.144283 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2"] Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145235 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145255 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145276 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145285 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-api" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145301 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145309 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145323 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145343 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145358 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145366 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-server" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145382 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145391 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145400 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8288e005-1d07-4989-bc64-64b3ecd62993" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145406 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8288e005-1d07-4989-bc64-64b3ecd62993" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145419 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="registry-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145425 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="registry-server" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145477 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-notification-agent" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145491 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-notification-agent" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145506 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6c9b673-669e-464a-b012-8b39314e1990" containerName="mysql-bootstrap" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145516 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6c9b673-669e-464a-b012-8b39314e1990" containerName="mysql-bootstrap" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145529 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="61549c8e-2955-4350-9055-731ceb896fdc" containerName="nova-cell1-conductor-conductor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145536 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="61549c8e-2955-4350-9055-731ceb896fdc" containerName="nova-cell1-conductor-conductor" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145551 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="rsync" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145558 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="rsync" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145571 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" containerName="kube-state-metrics" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145578 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" containerName="kube-state-metrics" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145586 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145594 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-server" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145604 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bf149d2-9beb-4394-921a-a703473391aa" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145614 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bf149d2-9beb-4394-921a-a703473391aa" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145628 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="sg-core" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145635 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="sg-core" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145651 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d4d3b72-fd05-4a47-925c-f17f77c46fc1" containerName="keystone-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145658 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d4d3b72-fd05-4a47-925c-f17f77c46fc1" containerName="keystone-api" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145670 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1ee99c5-4515-45fd-ad45-cd7d96f85c10" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145677 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1ee99c5-4515-45fd-ad45-cd7d96f85c10" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145685 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145693 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145703 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145711 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145721 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="extract-content" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145728 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="extract-content" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145739 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145745 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145756 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-updater" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145763 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-updater" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145773 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145781 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145789 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1db40e4-bf66-4d82-aa94-c54d44513220" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145794 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1db40e4-bf66-4d82-aa94-c54d44513220" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145804 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145809 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145817 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145822 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145830 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerName="nova-cell0-conductor-conductor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145836 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerName="nova-cell0-conductor-conductor" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145843 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-expirer" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145848 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-expirer" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145855 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="swift-recon-cron" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145861 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="swift-recon-cron" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145871 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145876 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145884 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="galera" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145891 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="galera" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145899 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145906 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145918 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba0bce06-3761-4bb4-8e35-305dc48b3277" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145925 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba0bce06-3761-4bb4-8e35-305dc48b3277" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145934 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server-init" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145964 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server-init" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145976 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="ovn-northd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145983 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="ovn-northd" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.145992 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="openstack-network-exporter" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.145998 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="openstack-network-exporter" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146009 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146014 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146020 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e222fcd6-26e0-46af-82ab-7cf038a18195" containerName="nova-scheduler-scheduler" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146026 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e222fcd6-26e0-46af-82ab-7cf038a18195" containerName="nova-scheduler-scheduler" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146036 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146042 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146052 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146058 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146064 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-metadata" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146069 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-metadata" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146081 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c03f40d-958a-49a0-a2f7-54a1f175caf7" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146087 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c03f40d-958a-49a0-a2f7-54a1f175caf7" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146096 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="464d5189-d9e5-4b18-b383-a7d75a28771b" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146101 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="464d5189-d9e5-4b18-b383-a7d75a28771b" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146109 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="setup-container" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146114 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="setup-container" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146124 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="cinder-scheduler" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146129 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="cinder-scheduler" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146137 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6c9b673-669e-464a-b012-8b39314e1990" containerName="galera" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146142 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6c9b673-669e-464a-b012-8b39314e1990" containerName="galera" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146151 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146156 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146164 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146171 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-api" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146180 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="proxy-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146188 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="proxy-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146198 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146204 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-server" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146212 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146217 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146226 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1f87bb5-7cc1-4533-b145-d855e45205ca" containerName="memcached" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146232 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1f87bb5-7cc1-4533-b145-d855e45205ca" containerName="memcached" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146238 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146244 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-api" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146289 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-central-agent" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146296 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-central-agent" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146308 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-reaper" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146314 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-reaper" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146323 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146329 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146337 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="rabbitmq" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146343 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="rabbitmq" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146350 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-updater" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146355 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-updater" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146365 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="extract-utilities" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146371 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="extract-utilities" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146380 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="rabbitmq" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146385 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="rabbitmq" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146394 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146400 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-log" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146406 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146412 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146420 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="mysql-bootstrap" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146426 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="mysql-bootstrap" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146433 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146439 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146446 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146452 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146487 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="setup-container" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146497 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="setup-container" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146505 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b25b8b-8f1c-4f60-b275-f924f1c0812a" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146511 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b25b8b-8f1c-4f60-b275-f924f1c0812a" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146521 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146527 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: E1128 14:00:00.146535 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="probe" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146541 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="probe" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146684 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146693 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146701 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146710 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146716 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba0bce06-3761-4bb4-8e35-305dc48b3277" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146727 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="proxy-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146737 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146744 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovsdb-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146753 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146762 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146770 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146776 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146784 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d64a95a8-7e15-40a3-b2f4-54c65c2cbf45" containerName="kube-state-metrics" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146790 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4461ee2f-d629-4e7b-8da5-e2e0dbee3cbd" containerName="glance-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146796 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-central-agent" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146805 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d4d3b72-fd05-4a47-925c-f17f77c46fc1" containerName="keystone-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146813 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="29b25b8b-8f1c-4f60-b275-f924f1c0812a" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146819 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="464d5189-d9e5-4b18-b383-a7d75a28771b" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146830 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5550a25-04ef-4dde-afd4-627f1df97a90" containerName="rabbitmq" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146838 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146847 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b8a27b2-34f7-4f9a-aaa5-93c0817d14dd" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146862 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146877 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="791bae3e-043c-4a91-8e8b-d1d574dcb008" containerName="barbican-worker-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146885 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6c9b673-669e-464a-b012-8b39314e1990" containerName="galera" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146896 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="75c7c292-3658-4264-b86b-2a825aeb9ad4" containerName="galera" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146907 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146916 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146926 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146933 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="21fe1399-7f40-43ec-bee8-868c937a6e19" containerName="ovs-vswitchd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146940 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146967 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="61549c8e-2955-4350-9055-731ceb896fdc" containerName="nova-cell1-conductor-conductor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146978 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e222fcd6-26e0-46af-82ab-7cf038a18195" containerName="nova-scheduler-scheduler" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.146988 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-replicator" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147002 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1db40e4-bf66-4d82-aa94-c54d44513220" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147015 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="probe" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147022 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-auditor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147027 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0944133e-cee5-4927-8f5e-8f781b30d224" containerName="placement-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147033 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-updater" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147040 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="object-expirer" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147050 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1f87bb5-7cc1-4533-b145-d855e45205ca" containerName="memcached" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147058 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="account-reaper" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147068 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d973a56d-fc8e-4cef-a590-d21d32242dc4" containerName="cinder-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147077 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="960b2298-15f9-436b-93c9-04b0617c0c9b" containerName="neutron-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147089 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="openstack-network-exporter" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147102 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ec18e7-6719-46dd-b580-303f3da41869" containerName="rabbitmq" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147114 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f9668e3-0293-4d8f-aa56-ad830134b0e4" containerName="registry-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147126 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="ceilometer-notification-agent" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147137 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="73acd93b-d5bf-4a4e-aadc-9d94e0e6af6d" containerName="nova-cell0-conductor-conductor" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147149 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="10f5dca0-ca0a-4e88-838f-14affb1dead5" containerName="nova-metadata-metadata" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147155 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1e13053-d5d0-4d38-8758-4ebf494ededb" containerName="sg-core" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147166 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1ee99c5-4515-45fd-ad45-cd7d96f85c10" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147178 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="63f7c690-a408-4e1f-8959-b22badb1b9dc" containerName="barbican-api" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147199 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6597fde-8e34-4ccb-8784-1b7aa3680488" containerName="ovn-northd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147207 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-server" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147218 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="container-updater" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147226 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="swift-recon-cron" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147237 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c69ec619-0d17-4a49-8f97-6db48291122d" containerName="nova-api-log" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147247 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bf149d2-9beb-4394-921a-a703473391aa" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147258 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c03f40d-958a-49a0-a2f7-54a1f175caf7" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147286 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dacf187-3671-4114-a93e-e2296c8c20b2" containerName="rsync" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147301 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8288e005-1d07-4989-bc64-64b3ecd62993" containerName="mariadb-account-delete" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147309 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ee51cc1-1ef7-4b41-92a4-eeb50ee0247c" containerName="cinder-scheduler" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147321 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2d5c05c-573d-441d-bc1d-9c2b3b8fd92e" containerName="glance-httpd" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.147882 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.150271 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.150288 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.152286 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2"] Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.297000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed82d2ae-eed6-482d-b727-95a6ee985903-secret-volume\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.297094 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed82d2ae-eed6-482d-b727-95a6ee985903-config-volume\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.297224 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-972jf\" (UniqueName: \"kubernetes.io/projected/ed82d2ae-eed6-482d-b727-95a6ee985903-kube-api-access-972jf\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.399170 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-972jf\" (UniqueName: \"kubernetes.io/projected/ed82d2ae-eed6-482d-b727-95a6ee985903-kube-api-access-972jf\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.399313 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed82d2ae-eed6-482d-b727-95a6ee985903-secret-volume\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.399350 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed82d2ae-eed6-482d-b727-95a6ee985903-config-volume\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.400373 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed82d2ae-eed6-482d-b727-95a6ee985903-config-volume\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.405537 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed82d2ae-eed6-482d-b727-95a6ee985903-secret-volume\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.416140 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-972jf\" (UniqueName: \"kubernetes.io/projected/ed82d2ae-eed6-482d-b727-95a6ee985903-kube-api-access-972jf\") pod \"collect-profiles-29405640-826p2\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.469104 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:00 crc kubenswrapper[4857]: I1128 14:00:00.875345 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2"] Nov 28 14:00:01 crc kubenswrapper[4857]: I1128 14:00:01.296770 4857 generic.go:334] "Generic (PLEG): container finished" podID="ed82d2ae-eed6-482d-b727-95a6ee985903" containerID="b66a2bf5a9e8d5adfa4cb09f9f77444013aaf21294f13611970e87e6340d651c" exitCode=0 Nov 28 14:00:01 crc kubenswrapper[4857]: I1128 14:00:01.296815 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" event={"ID":"ed82d2ae-eed6-482d-b727-95a6ee985903","Type":"ContainerDied","Data":"b66a2bf5a9e8d5adfa4cb09f9f77444013aaf21294f13611970e87e6340d651c"} Nov 28 14:00:01 crc kubenswrapper[4857]: I1128 14:00:01.297060 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" event={"ID":"ed82d2ae-eed6-482d-b727-95a6ee985903","Type":"ContainerStarted","Data":"4966c109dc230e82607ddd38b0b2a59ef1f01005268ccab601619b7c3221ec99"} Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.571929 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.733883 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-972jf\" (UniqueName: \"kubernetes.io/projected/ed82d2ae-eed6-482d-b727-95a6ee985903-kube-api-access-972jf\") pod \"ed82d2ae-eed6-482d-b727-95a6ee985903\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.734005 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed82d2ae-eed6-482d-b727-95a6ee985903-config-volume\") pod \"ed82d2ae-eed6-482d-b727-95a6ee985903\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.734086 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed82d2ae-eed6-482d-b727-95a6ee985903-secret-volume\") pod \"ed82d2ae-eed6-482d-b727-95a6ee985903\" (UID: \"ed82d2ae-eed6-482d-b727-95a6ee985903\") " Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.734917 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed82d2ae-eed6-482d-b727-95a6ee985903-config-volume" (OuterVolumeSpecName: "config-volume") pod "ed82d2ae-eed6-482d-b727-95a6ee985903" (UID: "ed82d2ae-eed6-482d-b727-95a6ee985903"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.739209 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed82d2ae-eed6-482d-b727-95a6ee985903-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ed82d2ae-eed6-482d-b727-95a6ee985903" (UID: "ed82d2ae-eed6-482d-b727-95a6ee985903"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.739745 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed82d2ae-eed6-482d-b727-95a6ee985903-kube-api-access-972jf" (OuterVolumeSpecName: "kube-api-access-972jf") pod "ed82d2ae-eed6-482d-b727-95a6ee985903" (UID: "ed82d2ae-eed6-482d-b727-95a6ee985903"). InnerVolumeSpecName "kube-api-access-972jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.835452 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed82d2ae-eed6-482d-b727-95a6ee985903-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.835492 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-972jf\" (UniqueName: \"kubernetes.io/projected/ed82d2ae-eed6-482d-b727-95a6ee985903-kube-api-access-972jf\") on node \"crc\" DevicePath \"\"" Nov 28 14:00:02 crc kubenswrapper[4857]: I1128 14:00:02.835502 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed82d2ae-eed6-482d-b727-95a6ee985903-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:00:03 crc kubenswrapper[4857]: I1128 14:00:03.314913 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" event={"ID":"ed82d2ae-eed6-482d-b727-95a6ee985903","Type":"ContainerDied","Data":"4966c109dc230e82607ddd38b0b2a59ef1f01005268ccab601619b7c3221ec99"} Nov 28 14:00:03 crc kubenswrapper[4857]: I1128 14:00:03.314967 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4966c109dc230e82607ddd38b0b2a59ef1f01005268ccab601619b7c3221ec99" Nov 28 14:00:03 crc kubenswrapper[4857]: I1128 14:00:03.315670 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2" Nov 28 14:00:07 crc kubenswrapper[4857]: I1128 14:00:07.229497 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:00:07 crc kubenswrapper[4857]: E1128 14:00:07.230966 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:00:14 crc kubenswrapper[4857]: I1128 14:00:14.882907 4857 scope.go:117] "RemoveContainer" containerID="a53c79818510e05af3b0226cdc53a09f21f7f4d8a28bc0ff6b6c0f2f8c909714" Nov 28 14:00:14 crc kubenswrapper[4857]: I1128 14:00:14.925919 4857 scope.go:117] "RemoveContainer" containerID="ada4622992cb990d822672c1f29ee9bd8e30cbd244227c7ff8d566127a8be7c6" Nov 28 14:00:14 crc kubenswrapper[4857]: I1128 14:00:14.963187 4857 scope.go:117] "RemoveContainer" containerID="9f84f9d0085e2e21c564f7ffe093e88ebd9a92661819a1d7ef85e3d19ef948f0" Nov 28 14:00:14 crc kubenswrapper[4857]: I1128 14:00:14.981416 4857 scope.go:117] "RemoveContainer" containerID="a38af6fde6496d26130d969ef4105a2c9b98637c9a5a25701815126e08a86976" Nov 28 14:00:15 crc kubenswrapper[4857]: I1128 14:00:15.000668 4857 scope.go:117] "RemoveContainer" containerID="1456dd1bd3e0121d518cc6a6cd94cf805fa6cf250c06d4aca8868a28844b9064" Nov 28 14:00:15 crc kubenswrapper[4857]: I1128 14:00:15.041857 4857 scope.go:117] "RemoveContainer" containerID="f33b7982301c419d34731196833b066b89d549cda95aad48f7d2760468d1c871" Nov 28 14:00:15 crc kubenswrapper[4857]: I1128 14:00:15.059211 4857 scope.go:117] "RemoveContainer" containerID="751f12a19ac746a9f59958709a679ab6e0f8b0c295bc0836d21ad235a87f6a5b" Nov 28 14:00:15 crc kubenswrapper[4857]: I1128 14:00:15.102848 4857 scope.go:117] "RemoveContainer" containerID="f54f8a6fd4e0e294545a7856447cceacaee0009bf4bf76a318b17263cefceaa2" Nov 28 14:00:15 crc kubenswrapper[4857]: I1128 14:00:15.158382 4857 scope.go:117] "RemoveContainer" containerID="2cd656be1985a74adb1ed52d510cb94e3e9f9d8ec5011e4fd68bc155cf37553b" Nov 28 14:00:15 crc kubenswrapper[4857]: I1128 14:00:15.174423 4857 scope.go:117] "RemoveContainer" containerID="888fe2c7fb9a72d11577c72ea2a6f6697c1a76294064e4ee884fb8613fd8badd" Nov 28 14:00:19 crc kubenswrapper[4857]: I1128 14:00:19.228661 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:00:19 crc kubenswrapper[4857]: E1128 14:00:19.229559 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:00:34 crc kubenswrapper[4857]: I1128 14:00:34.230271 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:00:34 crc kubenswrapper[4857]: E1128 14:00:34.231078 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:00:45 crc kubenswrapper[4857]: I1128 14:00:45.229450 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:00:45 crc kubenswrapper[4857]: E1128 14:00:45.230220 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:00:56 crc kubenswrapper[4857]: I1128 14:00:56.229528 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:00:56 crc kubenswrapper[4857]: E1128 14:00:56.230429 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:01:11 crc kubenswrapper[4857]: I1128 14:01:11.228628 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:01:11 crc kubenswrapper[4857]: E1128 14:01:11.229250 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:01:15 crc kubenswrapper[4857]: I1128 14:01:15.281284 4857 scope.go:117] "RemoveContainer" containerID="c3b59678ed12525f008d6ca809cb6ade0e8c7fdf6af8a86447b76b547d93a568" Nov 28 14:01:15 crc kubenswrapper[4857]: I1128 14:01:15.302525 4857 scope.go:117] "RemoveContainer" containerID="81d6a8c7ead7745aa805448fe574ca2ff96413c2257b85ed565017d004466096" Nov 28 14:01:15 crc kubenswrapper[4857]: I1128 14:01:15.331696 4857 scope.go:117] "RemoveContainer" containerID="59688c0cb3757ef0ac6c752acc711bd544b32c0b43a9e22d4498d69defc2540d" Nov 28 14:01:15 crc kubenswrapper[4857]: I1128 14:01:15.355804 4857 scope.go:117] "RemoveContainer" containerID="24e4d1d6de5d290d9a6d84ca43643deb0280b77042affc77061527b2eb9622bd" Nov 28 14:01:15 crc kubenswrapper[4857]: I1128 14:01:15.381384 4857 scope.go:117] "RemoveContainer" containerID="9ac0d5961e71550335295fb51102d474972c116efc7d7bd4fb89a720a9055217" Nov 28 14:01:24 crc kubenswrapper[4857]: I1128 14:01:24.229571 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:01:24 crc kubenswrapper[4857]: I1128 14:01:24.889595 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"3516092786f533a199a3e095718056c7b89b4e92fc20c4820d74a02d53e0de82"} Nov 28 14:03:41 crc kubenswrapper[4857]: I1128 14:03:41.309159 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:03:41 crc kubenswrapper[4857]: I1128 14:03:41.310347 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.526128 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2vz7r"] Nov 28 14:03:58 crc kubenswrapper[4857]: E1128 14:03:58.526886 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed82d2ae-eed6-482d-b727-95a6ee985903" containerName="collect-profiles" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.526899 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed82d2ae-eed6-482d-b727-95a6ee985903" containerName="collect-profiles" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.527056 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed82d2ae-eed6-482d-b727-95a6ee985903" containerName="collect-profiles" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.528052 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.543142 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2vz7r"] Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.637209 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-utilities\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.637544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvjsb\" (UniqueName: \"kubernetes.io/projected/baf85960-79a2-4349-9eeb-29f51ff1b7cf-kube-api-access-lvjsb\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.637756 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-catalog-content\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.739439 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-catalog-content\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.739499 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-utilities\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.739543 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvjsb\" (UniqueName: \"kubernetes.io/projected/baf85960-79a2-4349-9eeb-29f51ff1b7cf-kube-api-access-lvjsb\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.740130 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-utilities\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.740286 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-catalog-content\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.772034 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvjsb\" (UniqueName: \"kubernetes.io/projected/baf85960-79a2-4349-9eeb-29f51ff1b7cf-kube-api-access-lvjsb\") pod \"community-operators-2vz7r\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:58 crc kubenswrapper[4857]: I1128 14:03:58.855580 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:03:59 crc kubenswrapper[4857]: I1128 14:03:59.337403 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2vz7r"] Nov 28 14:04:00 crc kubenswrapper[4857]: I1128 14:04:00.076681 4857 generic.go:334] "Generic (PLEG): container finished" podID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerID="2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9" exitCode=0 Nov 28 14:04:00 crc kubenswrapper[4857]: I1128 14:04:00.076997 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2vz7r" event={"ID":"baf85960-79a2-4349-9eeb-29f51ff1b7cf","Type":"ContainerDied","Data":"2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9"} Nov 28 14:04:00 crc kubenswrapper[4857]: I1128 14:04:00.077026 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2vz7r" event={"ID":"baf85960-79a2-4349-9eeb-29f51ff1b7cf","Type":"ContainerStarted","Data":"f649c2329a0043e937e884475e7bf36bae86965fda01078c89d64ba41f042004"} Nov 28 14:04:00 crc kubenswrapper[4857]: I1128 14:04:00.079786 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:04:02 crc kubenswrapper[4857]: I1128 14:04:02.092743 4857 generic.go:334] "Generic (PLEG): container finished" podID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerID="bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201" exitCode=0 Nov 28 14:04:02 crc kubenswrapper[4857]: I1128 14:04:02.092864 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2vz7r" event={"ID":"baf85960-79a2-4349-9eeb-29f51ff1b7cf","Type":"ContainerDied","Data":"bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201"} Nov 28 14:04:03 crc kubenswrapper[4857]: I1128 14:04:03.101264 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2vz7r" event={"ID":"baf85960-79a2-4349-9eeb-29f51ff1b7cf","Type":"ContainerStarted","Data":"4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc"} Nov 28 14:04:03 crc kubenswrapper[4857]: I1128 14:04:03.127343 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2vz7r" podStartSLOduration=2.579860207 podStartE2EDuration="5.127322135s" podCreationTimestamp="2025-11-28 14:03:58 +0000 UTC" firstStartedPulling="2025-11-28 14:04:00.079317608 +0000 UTC m=+2090.203259065" lastFinishedPulling="2025-11-28 14:04:02.626779546 +0000 UTC m=+2092.750720993" observedRunningTime="2025-11-28 14:04:03.11860728 +0000 UTC m=+2093.242548717" watchObservedRunningTime="2025-11-28 14:04:03.127322135 +0000 UTC m=+2093.251263572" Nov 28 14:04:08 crc kubenswrapper[4857]: I1128 14:04:08.867018 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:04:08 crc kubenswrapper[4857]: I1128 14:04:08.867405 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:04:08 crc kubenswrapper[4857]: I1128 14:04:08.907101 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:04:09 crc kubenswrapper[4857]: I1128 14:04:09.185123 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:04:09 crc kubenswrapper[4857]: I1128 14:04:09.237091 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2vz7r"] Nov 28 14:04:11 crc kubenswrapper[4857]: I1128 14:04:11.157972 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2vz7r" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="registry-server" containerID="cri-o://4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc" gracePeriod=2 Nov 28 14:04:11 crc kubenswrapper[4857]: I1128 14:04:11.308577 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:04:11 crc kubenswrapper[4857]: I1128 14:04:11.308645 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.012634 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.145637 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-catalog-content\") pod \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.145699 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-utilities\") pod \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.145743 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvjsb\" (UniqueName: \"kubernetes.io/projected/baf85960-79a2-4349-9eeb-29f51ff1b7cf-kube-api-access-lvjsb\") pod \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\" (UID: \"baf85960-79a2-4349-9eeb-29f51ff1b7cf\") " Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.148148 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-utilities" (OuterVolumeSpecName: "utilities") pod "baf85960-79a2-4349-9eeb-29f51ff1b7cf" (UID: "baf85960-79a2-4349-9eeb-29f51ff1b7cf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.152786 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baf85960-79a2-4349-9eeb-29f51ff1b7cf-kube-api-access-lvjsb" (OuterVolumeSpecName: "kube-api-access-lvjsb") pod "baf85960-79a2-4349-9eeb-29f51ff1b7cf" (UID: "baf85960-79a2-4349-9eeb-29f51ff1b7cf"). InnerVolumeSpecName "kube-api-access-lvjsb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.166779 4857 generic.go:334] "Generic (PLEG): container finished" podID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerID="4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc" exitCode=0 Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.166836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2vz7r" event={"ID":"baf85960-79a2-4349-9eeb-29f51ff1b7cf","Type":"ContainerDied","Data":"4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc"} Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.166895 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2vz7r" event={"ID":"baf85960-79a2-4349-9eeb-29f51ff1b7cf","Type":"ContainerDied","Data":"f649c2329a0043e937e884475e7bf36bae86965fda01078c89d64ba41f042004"} Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.166964 4857 scope.go:117] "RemoveContainer" containerID="4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.167915 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2vz7r" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.186397 4857 scope.go:117] "RemoveContainer" containerID="bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.204437 4857 scope.go:117] "RemoveContainer" containerID="2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.247482 4857 scope.go:117] "RemoveContainer" containerID="4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.248692 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.248729 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvjsb\" (UniqueName: \"kubernetes.io/projected/baf85960-79a2-4349-9eeb-29f51ff1b7cf-kube-api-access-lvjsb\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:12 crc kubenswrapper[4857]: E1128 14:04:12.249377 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc\": container with ID starting with 4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc not found: ID does not exist" containerID="4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.249623 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc"} err="failed to get container status \"4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc\": rpc error: code = NotFound desc = could not find container \"4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc\": container with ID starting with 4ee93664805cd1c66adb9c987dedd234e248143c7c6ee35aa38a4d959d5963fc not found: ID does not exist" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.249660 4857 scope.go:117] "RemoveContainer" containerID="bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201" Nov 28 14:04:12 crc kubenswrapper[4857]: E1128 14:04:12.251058 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201\": container with ID starting with bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201 not found: ID does not exist" containerID="bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.251229 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201"} err="failed to get container status \"bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201\": rpc error: code = NotFound desc = could not find container \"bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201\": container with ID starting with bb6e9f745eeedc2a031f7d0479005fd7818836ad74f4c38d9335549178c3f201 not found: ID does not exist" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.251322 4857 scope.go:117] "RemoveContainer" containerID="2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9" Nov 28 14:04:12 crc kubenswrapper[4857]: E1128 14:04:12.251659 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9\": container with ID starting with 2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9 not found: ID does not exist" containerID="2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.251699 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9"} err="failed to get container status \"2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9\": rpc error: code = NotFound desc = could not find container \"2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9\": container with ID starting with 2c11d9126c0e1629a38c1d1e19ad0c917a4a07d9c3548096a8c01b98138c62c9 not found: ID does not exist" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.360602 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "baf85960-79a2-4349-9eeb-29f51ff1b7cf" (UID: "baf85960-79a2-4349-9eeb-29f51ff1b7cf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.451907 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/baf85960-79a2-4349-9eeb-29f51ff1b7cf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.507054 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2vz7r"] Nov 28 14:04:12 crc kubenswrapper[4857]: I1128 14:04:12.513366 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2vz7r"] Nov 28 14:04:14 crc kubenswrapper[4857]: I1128 14:04:14.239005 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" path="/var/lib/kubelet/pods/baf85960-79a2-4349-9eeb-29f51ff1b7cf/volumes" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.007077 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4nvsb"] Nov 28 14:04:16 crc kubenswrapper[4857]: E1128 14:04:16.007698 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="extract-utilities" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.007711 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="extract-utilities" Nov 28 14:04:16 crc kubenswrapper[4857]: E1128 14:04:16.007729 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="extract-content" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.007735 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="extract-content" Nov 28 14:04:16 crc kubenswrapper[4857]: E1128 14:04:16.007750 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="registry-server" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.007757 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="registry-server" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.007899 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="baf85960-79a2-4349-9eeb-29f51ff1b7cf" containerName="registry-server" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.009493 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.013549 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4nvsb"] Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.110546 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-utilities\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.110613 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-catalog-content\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.110638 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drn9q\" (UniqueName: \"kubernetes.io/projected/bde13430-6750-4d82-89b1-b817b3278def-kube-api-access-drn9q\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.212280 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-utilities\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.212567 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-catalog-content\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.212638 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drn9q\" (UniqueName: \"kubernetes.io/projected/bde13430-6750-4d82-89b1-b817b3278def-kube-api-access-drn9q\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.212877 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-utilities\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.213167 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-catalog-content\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.244081 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drn9q\" (UniqueName: \"kubernetes.io/projected/bde13430-6750-4d82-89b1-b817b3278def-kube-api-access-drn9q\") pod \"redhat-operators-4nvsb\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.368201 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:16 crc kubenswrapper[4857]: I1128 14:04:16.810696 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4nvsb"] Nov 28 14:04:17 crc kubenswrapper[4857]: I1128 14:04:17.202141 4857 generic.go:334] "Generic (PLEG): container finished" podID="bde13430-6750-4d82-89b1-b817b3278def" containerID="6b80a41a345867f30fc880028c653fcc69a5f2cade4dd15eba18bfea26010c33" exitCode=0 Nov 28 14:04:17 crc kubenswrapper[4857]: I1128 14:04:17.202187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerDied","Data":"6b80a41a345867f30fc880028c653fcc69a5f2cade4dd15eba18bfea26010c33"} Nov 28 14:04:17 crc kubenswrapper[4857]: I1128 14:04:17.202212 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerStarted","Data":"b62f0ab1a0c01f02fd41bbfb92997590ba1f471ce1fd5d0ef3e79d479ea9940d"} Nov 28 14:04:18 crc kubenswrapper[4857]: I1128 14:04:18.214140 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerStarted","Data":"e0af244979e8ca2a7ddc953e6aec170e010875d2c8b2069e49a7fc8229367f36"} Nov 28 14:04:19 crc kubenswrapper[4857]: I1128 14:04:19.226387 4857 generic.go:334] "Generic (PLEG): container finished" podID="bde13430-6750-4d82-89b1-b817b3278def" containerID="e0af244979e8ca2a7ddc953e6aec170e010875d2c8b2069e49a7fc8229367f36" exitCode=0 Nov 28 14:04:19 crc kubenswrapper[4857]: I1128 14:04:19.226495 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerDied","Data":"e0af244979e8ca2a7ddc953e6aec170e010875d2c8b2069e49a7fc8229367f36"} Nov 28 14:04:20 crc kubenswrapper[4857]: I1128 14:04:20.238873 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerStarted","Data":"7e85daf9331fb8b336984cc95f504ae684732d7bdc6f538a98af7fa25fd16f9b"} Nov 28 14:04:20 crc kubenswrapper[4857]: I1128 14:04:20.255908 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4nvsb" podStartSLOduration=2.7238781 podStartE2EDuration="5.255887982s" podCreationTimestamp="2025-11-28 14:04:15 +0000 UTC" firstStartedPulling="2025-11-28 14:04:17.204863604 +0000 UTC m=+2107.328805041" lastFinishedPulling="2025-11-28 14:04:19.736873486 +0000 UTC m=+2109.860814923" observedRunningTime="2025-11-28 14:04:20.255419189 +0000 UTC m=+2110.379360636" watchObservedRunningTime="2025-11-28 14:04:20.255887982 +0000 UTC m=+2110.379829419" Nov 28 14:04:26 crc kubenswrapper[4857]: I1128 14:04:26.369441 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:26 crc kubenswrapper[4857]: I1128 14:04:26.370131 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:26 crc kubenswrapper[4857]: I1128 14:04:26.418156 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:27 crc kubenswrapper[4857]: I1128 14:04:27.342411 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:27 crc kubenswrapper[4857]: I1128 14:04:27.393533 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4nvsb"] Nov 28 14:04:29 crc kubenswrapper[4857]: I1128 14:04:29.307922 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4nvsb" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="registry-server" containerID="cri-o://7e85daf9331fb8b336984cc95f504ae684732d7bdc6f538a98af7fa25fd16f9b" gracePeriod=2 Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.328487 4857 generic.go:334] "Generic (PLEG): container finished" podID="bde13430-6750-4d82-89b1-b817b3278def" containerID="7e85daf9331fb8b336984cc95f504ae684732d7bdc6f538a98af7fa25fd16f9b" exitCode=0 Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.328620 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerDied","Data":"7e85daf9331fb8b336984cc95f504ae684732d7bdc6f538a98af7fa25fd16f9b"} Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.724238 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.762160 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-catalog-content\") pod \"bde13430-6750-4d82-89b1-b817b3278def\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.762214 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-utilities\") pod \"bde13430-6750-4d82-89b1-b817b3278def\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.762256 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drn9q\" (UniqueName: \"kubernetes.io/projected/bde13430-6750-4d82-89b1-b817b3278def-kube-api-access-drn9q\") pod \"bde13430-6750-4d82-89b1-b817b3278def\" (UID: \"bde13430-6750-4d82-89b1-b817b3278def\") " Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.763008 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-utilities" (OuterVolumeSpecName: "utilities") pod "bde13430-6750-4d82-89b1-b817b3278def" (UID: "bde13430-6750-4d82-89b1-b817b3278def"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.770398 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bde13430-6750-4d82-89b1-b817b3278def-kube-api-access-drn9q" (OuterVolumeSpecName: "kube-api-access-drn9q") pod "bde13430-6750-4d82-89b1-b817b3278def" (UID: "bde13430-6750-4d82-89b1-b817b3278def"). InnerVolumeSpecName "kube-api-access-drn9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.863625 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.863660 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drn9q\" (UniqueName: \"kubernetes.io/projected/bde13430-6750-4d82-89b1-b817b3278def-kube-api-access-drn9q\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.909753 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bde13430-6750-4d82-89b1-b817b3278def" (UID: "bde13430-6750-4d82-89b1-b817b3278def"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:04:31 crc kubenswrapper[4857]: I1128 14:04:31.964527 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bde13430-6750-4d82-89b1-b817b3278def-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.338074 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4nvsb" event={"ID":"bde13430-6750-4d82-89b1-b817b3278def","Type":"ContainerDied","Data":"b62f0ab1a0c01f02fd41bbfb92997590ba1f471ce1fd5d0ef3e79d479ea9940d"} Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.338131 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4nvsb" Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.338135 4857 scope.go:117] "RemoveContainer" containerID="7e85daf9331fb8b336984cc95f504ae684732d7bdc6f538a98af7fa25fd16f9b" Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.362429 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4nvsb"] Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.368457 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4nvsb"] Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.376466 4857 scope.go:117] "RemoveContainer" containerID="e0af244979e8ca2a7ddc953e6aec170e010875d2c8b2069e49a7fc8229367f36" Nov 28 14:04:32 crc kubenswrapper[4857]: I1128 14:04:32.397507 4857 scope.go:117] "RemoveContainer" containerID="6b80a41a345867f30fc880028c653fcc69a5f2cade4dd15eba18bfea26010c33" Nov 28 14:04:34 crc kubenswrapper[4857]: I1128 14:04:34.237364 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bde13430-6750-4d82-89b1-b817b3278def" path="/var/lib/kubelet/pods/bde13430-6750-4d82-89b1-b817b3278def/volumes" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.173166 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xdkqd"] Nov 28 14:04:35 crc kubenswrapper[4857]: E1128 14:04:35.173466 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="registry-server" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.173478 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="registry-server" Nov 28 14:04:35 crc kubenswrapper[4857]: E1128 14:04:35.173493 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="extract-content" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.173499 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="extract-content" Nov 28 14:04:35 crc kubenswrapper[4857]: E1128 14:04:35.173511 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="extract-utilities" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.173517 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="extract-utilities" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.173687 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bde13430-6750-4d82-89b1-b817b3278def" containerName="registry-server" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.174693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.187739 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xdkqd"] Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.305322 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxxlc\" (UniqueName: \"kubernetes.io/projected/f95124f2-19c7-439a-99c9-2524a610f66a-kube-api-access-jxxlc\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.305552 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-catalog-content\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.305685 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-utilities\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.406544 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-catalog-content\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.406642 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-utilities\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.406687 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxxlc\" (UniqueName: \"kubernetes.io/projected/f95124f2-19c7-439a-99c9-2524a610f66a-kube-api-access-jxxlc\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.407300 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-catalog-content\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.407334 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-utilities\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.426915 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxxlc\" (UniqueName: \"kubernetes.io/projected/f95124f2-19c7-439a-99c9-2524a610f66a-kube-api-access-jxxlc\") pod \"redhat-marketplace-xdkqd\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.495572 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:35 crc kubenswrapper[4857]: I1128 14:04:35.930618 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xdkqd"] Nov 28 14:04:36 crc kubenswrapper[4857]: E1128 14:04:36.231232 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf95124f2_19c7_439a_99c9_2524a610f66a.slice/crio-e538165d6a07f6b0b9d36974b9c2ae2c78cca45d6e54d78f802ec4db5461e523.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf95124f2_19c7_439a_99c9_2524a610f66a.slice/crio-conmon-e538165d6a07f6b0b9d36974b9c2ae2c78cca45d6e54d78f802ec4db5461e523.scope\": RecentStats: unable to find data in memory cache]" Nov 28 14:04:36 crc kubenswrapper[4857]: I1128 14:04:36.371086 4857 generic.go:334] "Generic (PLEG): container finished" podID="f95124f2-19c7-439a-99c9-2524a610f66a" containerID="e538165d6a07f6b0b9d36974b9c2ae2c78cca45d6e54d78f802ec4db5461e523" exitCode=0 Nov 28 14:04:36 crc kubenswrapper[4857]: I1128 14:04:36.371353 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xdkqd" event={"ID":"f95124f2-19c7-439a-99c9-2524a610f66a","Type":"ContainerDied","Data":"e538165d6a07f6b0b9d36974b9c2ae2c78cca45d6e54d78f802ec4db5461e523"} Nov 28 14:04:36 crc kubenswrapper[4857]: I1128 14:04:36.373965 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xdkqd" event={"ID":"f95124f2-19c7-439a-99c9-2524a610f66a","Type":"ContainerStarted","Data":"a1eab6b0c679d86d85fc0462659064bb7c0e93351b2f51474c08a16cbf129b2a"} Nov 28 14:04:38 crc kubenswrapper[4857]: I1128 14:04:38.390205 4857 generic.go:334] "Generic (PLEG): container finished" podID="f95124f2-19c7-439a-99c9-2524a610f66a" containerID="38f8c61e18867ded470b3df853a14806570c61c601562664959212ad26a104cd" exitCode=0 Nov 28 14:04:38 crc kubenswrapper[4857]: I1128 14:04:38.390325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xdkqd" event={"ID":"f95124f2-19c7-439a-99c9-2524a610f66a","Type":"ContainerDied","Data":"38f8c61e18867ded470b3df853a14806570c61c601562664959212ad26a104cd"} Nov 28 14:04:39 crc kubenswrapper[4857]: I1128 14:04:39.401533 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xdkqd" event={"ID":"f95124f2-19c7-439a-99c9-2524a610f66a","Type":"ContainerStarted","Data":"32e2e63b3458ccf74fc79fd9ea203437c9f078c36506001157dd86b9b967a4c8"} Nov 28 14:04:39 crc kubenswrapper[4857]: I1128 14:04:39.422679 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xdkqd" podStartSLOduration=1.7727414929999998 podStartE2EDuration="4.422659766s" podCreationTimestamp="2025-11-28 14:04:35 +0000 UTC" firstStartedPulling="2025-11-28 14:04:36.374442406 +0000 UTC m=+2126.498383843" lastFinishedPulling="2025-11-28 14:04:39.024360679 +0000 UTC m=+2129.148302116" observedRunningTime="2025-11-28 14:04:39.41907402 +0000 UTC m=+2129.543015477" watchObservedRunningTime="2025-11-28 14:04:39.422659766 +0000 UTC m=+2129.546601203" Nov 28 14:04:41 crc kubenswrapper[4857]: I1128 14:04:41.308584 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:04:41 crc kubenswrapper[4857]: I1128 14:04:41.308702 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:04:41 crc kubenswrapper[4857]: I1128 14:04:41.308785 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:04:41 crc kubenswrapper[4857]: I1128 14:04:41.309707 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3516092786f533a199a3e095718056c7b89b4e92fc20c4820d74a02d53e0de82"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:04:41 crc kubenswrapper[4857]: I1128 14:04:41.309820 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://3516092786f533a199a3e095718056c7b89b4e92fc20c4820d74a02d53e0de82" gracePeriod=600 Nov 28 14:04:42 crc kubenswrapper[4857]: I1128 14:04:42.428900 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="3516092786f533a199a3e095718056c7b89b4e92fc20c4820d74a02d53e0de82" exitCode=0 Nov 28 14:04:42 crc kubenswrapper[4857]: I1128 14:04:42.428981 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"3516092786f533a199a3e095718056c7b89b4e92fc20c4820d74a02d53e0de82"} Nov 28 14:04:42 crc kubenswrapper[4857]: I1128 14:04:42.429345 4857 scope.go:117] "RemoveContainer" containerID="6bdae42f0e762dbf0605c629cfcd74b51248b4c9c90654ba4b8431e4a0c5c123" Nov 28 14:04:43 crc kubenswrapper[4857]: I1128 14:04:43.441050 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f"} Nov 28 14:04:45 crc kubenswrapper[4857]: I1128 14:04:45.496192 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:45 crc kubenswrapper[4857]: I1128 14:04:45.496765 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:45 crc kubenswrapper[4857]: I1128 14:04:45.556477 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:46 crc kubenswrapper[4857]: I1128 14:04:46.513517 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:49 crc kubenswrapper[4857]: I1128 14:04:49.154590 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xdkqd"] Nov 28 14:04:49 crc kubenswrapper[4857]: I1128 14:04:49.155008 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xdkqd" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="registry-server" containerID="cri-o://32e2e63b3458ccf74fc79fd9ea203437c9f078c36506001157dd86b9b967a4c8" gracePeriod=2 Nov 28 14:04:49 crc kubenswrapper[4857]: I1128 14:04:49.498171 4857 generic.go:334] "Generic (PLEG): container finished" podID="f95124f2-19c7-439a-99c9-2524a610f66a" containerID="32e2e63b3458ccf74fc79fd9ea203437c9f078c36506001157dd86b9b967a4c8" exitCode=0 Nov 28 14:04:49 crc kubenswrapper[4857]: I1128 14:04:49.498241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xdkqd" event={"ID":"f95124f2-19c7-439a-99c9-2524a610f66a","Type":"ContainerDied","Data":"32e2e63b3458ccf74fc79fd9ea203437c9f078c36506001157dd86b9b967a4c8"} Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.064252 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.232702 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxxlc\" (UniqueName: \"kubernetes.io/projected/f95124f2-19c7-439a-99c9-2524a610f66a-kube-api-access-jxxlc\") pod \"f95124f2-19c7-439a-99c9-2524a610f66a\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.232878 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-utilities\") pod \"f95124f2-19c7-439a-99c9-2524a610f66a\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.232922 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-catalog-content\") pod \"f95124f2-19c7-439a-99c9-2524a610f66a\" (UID: \"f95124f2-19c7-439a-99c9-2524a610f66a\") " Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.233817 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-utilities" (OuterVolumeSpecName: "utilities") pod "f95124f2-19c7-439a-99c9-2524a610f66a" (UID: "f95124f2-19c7-439a-99c9-2524a610f66a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.234393 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.249571 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f95124f2-19c7-439a-99c9-2524a610f66a-kube-api-access-jxxlc" (OuterVolumeSpecName: "kube-api-access-jxxlc") pod "f95124f2-19c7-439a-99c9-2524a610f66a" (UID: "f95124f2-19c7-439a-99c9-2524a610f66a"). InnerVolumeSpecName "kube-api-access-jxxlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.260223 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f95124f2-19c7-439a-99c9-2524a610f66a" (UID: "f95124f2-19c7-439a-99c9-2524a610f66a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.336058 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f95124f2-19c7-439a-99c9-2524a610f66a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.336098 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxxlc\" (UniqueName: \"kubernetes.io/projected/f95124f2-19c7-439a-99c9-2524a610f66a-kube-api-access-jxxlc\") on node \"crc\" DevicePath \"\"" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.507467 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xdkqd" event={"ID":"f95124f2-19c7-439a-99c9-2524a610f66a","Type":"ContainerDied","Data":"a1eab6b0c679d86d85fc0462659064bb7c0e93351b2f51474c08a16cbf129b2a"} Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.507521 4857 scope.go:117] "RemoveContainer" containerID="32e2e63b3458ccf74fc79fd9ea203437c9f078c36506001157dd86b9b967a4c8" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.507797 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xdkqd" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.542672 4857 scope.go:117] "RemoveContainer" containerID="38f8c61e18867ded470b3df853a14806570c61c601562664959212ad26a104cd" Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.545107 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xdkqd"] Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.551120 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xdkqd"] Nov 28 14:04:50 crc kubenswrapper[4857]: I1128 14:04:50.582225 4857 scope.go:117] "RemoveContainer" containerID="e538165d6a07f6b0b9d36974b9c2ae2c78cca45d6e54d78f802ec4db5461e523" Nov 28 14:04:52 crc kubenswrapper[4857]: I1128 14:04:52.239458 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" path="/var/lib/kubelet/pods/f95124f2-19c7-439a-99c9-2524a610f66a/volumes" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.551640 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fc4h4"] Nov 28 14:05:19 crc kubenswrapper[4857]: E1128 14:05:19.552548 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="extract-content" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.552561 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="extract-content" Nov 28 14:05:19 crc kubenswrapper[4857]: E1128 14:05:19.552581 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="extract-utilities" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.552587 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="extract-utilities" Nov 28 14:05:19 crc kubenswrapper[4857]: E1128 14:05:19.552608 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="registry-server" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.552613 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="registry-server" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.552742 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f95124f2-19c7-439a-99c9-2524a610f66a" containerName="registry-server" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.554463 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.569134 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fc4h4"] Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.662742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4kzs\" (UniqueName: \"kubernetes.io/projected/553f5b57-9784-4877-8606-218ffae72289-kube-api-access-s4kzs\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.662801 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-catalog-content\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.662878 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-utilities\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.764176 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4kzs\" (UniqueName: \"kubernetes.io/projected/553f5b57-9784-4877-8606-218ffae72289-kube-api-access-s4kzs\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.764248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-catalog-content\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.764310 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-utilities\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.764812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-catalog-content\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.764835 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-utilities\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.784738 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4kzs\" (UniqueName: \"kubernetes.io/projected/553f5b57-9784-4877-8606-218ffae72289-kube-api-access-s4kzs\") pod \"certified-operators-fc4h4\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:19 crc kubenswrapper[4857]: I1128 14:05:19.872922 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:20 crc kubenswrapper[4857]: I1128 14:05:20.346362 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fc4h4"] Nov 28 14:05:20 crc kubenswrapper[4857]: I1128 14:05:20.725380 4857 generic.go:334] "Generic (PLEG): container finished" podID="553f5b57-9784-4877-8606-218ffae72289" containerID="694a9c5487c3155e77469c45da3c302c90ceba1d4864a8909f073569d344b3c3" exitCode=0 Nov 28 14:05:20 crc kubenswrapper[4857]: I1128 14:05:20.725480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerDied","Data":"694a9c5487c3155e77469c45da3c302c90ceba1d4864a8909f073569d344b3c3"} Nov 28 14:05:20 crc kubenswrapper[4857]: I1128 14:05:20.725517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerStarted","Data":"48548998f882e68624157f5b4ab93236f6abb6c6546de534f57a456b5251ce23"} Nov 28 14:05:21 crc kubenswrapper[4857]: I1128 14:05:21.738889 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerStarted","Data":"0237e1443a19956daecbbe4ca200ffde1331239cf62c4bb7b33cd6fca231c3a7"} Nov 28 14:05:22 crc kubenswrapper[4857]: I1128 14:05:22.747754 4857 generic.go:334] "Generic (PLEG): container finished" podID="553f5b57-9784-4877-8606-218ffae72289" containerID="0237e1443a19956daecbbe4ca200ffde1331239cf62c4bb7b33cd6fca231c3a7" exitCode=0 Nov 28 14:05:22 crc kubenswrapper[4857]: I1128 14:05:22.747825 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerDied","Data":"0237e1443a19956daecbbe4ca200ffde1331239cf62c4bb7b33cd6fca231c3a7"} Nov 28 14:05:24 crc kubenswrapper[4857]: I1128 14:05:24.766350 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerStarted","Data":"855ba26901fe031cc7792304533cd535e1444d52868df2e4143d78615be40273"} Nov 28 14:05:24 crc kubenswrapper[4857]: I1128 14:05:24.787010 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fc4h4" podStartSLOduration=2.75686762 podStartE2EDuration="5.786991263s" podCreationTimestamp="2025-11-28 14:05:19 +0000 UTC" firstStartedPulling="2025-11-28 14:05:20.727454302 +0000 UTC m=+2170.851395739" lastFinishedPulling="2025-11-28 14:05:23.757577945 +0000 UTC m=+2173.881519382" observedRunningTime="2025-11-28 14:05:24.781202709 +0000 UTC m=+2174.905144156" watchObservedRunningTime="2025-11-28 14:05:24.786991263 +0000 UTC m=+2174.910932700" Nov 28 14:05:29 crc kubenswrapper[4857]: I1128 14:05:29.873344 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:29 crc kubenswrapper[4857]: I1128 14:05:29.874054 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:29 crc kubenswrapper[4857]: I1128 14:05:29.918701 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:30 crc kubenswrapper[4857]: I1128 14:05:30.896986 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:30 crc kubenswrapper[4857]: I1128 14:05:30.939062 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fc4h4"] Nov 28 14:05:32 crc kubenswrapper[4857]: I1128 14:05:32.848557 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-fc4h4" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="registry-server" containerID="cri-o://855ba26901fe031cc7792304533cd535e1444d52868df2e4143d78615be40273" gracePeriod=2 Nov 28 14:05:34 crc kubenswrapper[4857]: I1128 14:05:34.868756 4857 generic.go:334] "Generic (PLEG): container finished" podID="553f5b57-9784-4877-8606-218ffae72289" containerID="855ba26901fe031cc7792304533cd535e1444d52868df2e4143d78615be40273" exitCode=0 Nov 28 14:05:34 crc kubenswrapper[4857]: I1128 14:05:34.868864 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerDied","Data":"855ba26901fe031cc7792304533cd535e1444d52868df2e4143d78615be40273"} Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.072490 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.135356 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-utilities\") pod \"553f5b57-9784-4877-8606-218ffae72289\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.135411 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4kzs\" (UniqueName: \"kubernetes.io/projected/553f5b57-9784-4877-8606-218ffae72289-kube-api-access-s4kzs\") pod \"553f5b57-9784-4877-8606-218ffae72289\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.135448 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-catalog-content\") pod \"553f5b57-9784-4877-8606-218ffae72289\" (UID: \"553f5b57-9784-4877-8606-218ffae72289\") " Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.136444 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-utilities" (OuterVolumeSpecName: "utilities") pod "553f5b57-9784-4877-8606-218ffae72289" (UID: "553f5b57-9784-4877-8606-218ffae72289"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.141124 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/553f5b57-9784-4877-8606-218ffae72289-kube-api-access-s4kzs" (OuterVolumeSpecName: "kube-api-access-s4kzs") pod "553f5b57-9784-4877-8606-218ffae72289" (UID: "553f5b57-9784-4877-8606-218ffae72289"). InnerVolumeSpecName "kube-api-access-s4kzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.190099 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "553f5b57-9784-4877-8606-218ffae72289" (UID: "553f5b57-9784-4877-8606-218ffae72289"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.236634 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.236675 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4kzs\" (UniqueName: \"kubernetes.io/projected/553f5b57-9784-4877-8606-218ffae72289-kube-api-access-s4kzs\") on node \"crc\" DevicePath \"\"" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.236685 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/553f5b57-9784-4877-8606-218ffae72289-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.878264 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fc4h4" event={"ID":"553f5b57-9784-4877-8606-218ffae72289","Type":"ContainerDied","Data":"48548998f882e68624157f5b4ab93236f6abb6c6546de534f57a456b5251ce23"} Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.878335 4857 scope.go:117] "RemoveContainer" containerID="855ba26901fe031cc7792304533cd535e1444d52868df2e4143d78615be40273" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.878373 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fc4h4" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.906079 4857 scope.go:117] "RemoveContainer" containerID="0237e1443a19956daecbbe4ca200ffde1331239cf62c4bb7b33cd6fca231c3a7" Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.922022 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-fc4h4"] Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.943664 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-fc4h4"] Nov 28 14:05:35 crc kubenswrapper[4857]: I1128 14:05:35.953695 4857 scope.go:117] "RemoveContainer" containerID="694a9c5487c3155e77469c45da3c302c90ceba1d4864a8909f073569d344b3c3" Nov 28 14:05:36 crc kubenswrapper[4857]: I1128 14:05:36.237394 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="553f5b57-9784-4877-8606-218ffae72289" path="/var/lib/kubelet/pods/553f5b57-9784-4877-8606-218ffae72289/volumes" Nov 28 14:07:11 crc kubenswrapper[4857]: I1128 14:07:11.308891 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:07:11 crc kubenswrapper[4857]: I1128 14:07:11.309634 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:07:41 crc kubenswrapper[4857]: I1128 14:07:41.308893 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:07:41 crc kubenswrapper[4857]: I1128 14:07:41.309431 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:08:11 crc kubenswrapper[4857]: I1128 14:08:11.308154 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:08:11 crc kubenswrapper[4857]: I1128 14:08:11.308757 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:08:11 crc kubenswrapper[4857]: I1128 14:08:11.308803 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:08:11 crc kubenswrapper[4857]: I1128 14:08:11.309542 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:08:11 crc kubenswrapper[4857]: I1128 14:08:11.309600 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" gracePeriod=600 Nov 28 14:08:11 crc kubenswrapper[4857]: E1128 14:08:11.450584 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:08:12 crc kubenswrapper[4857]: I1128 14:08:12.061705 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" exitCode=0 Nov 28 14:08:12 crc kubenswrapper[4857]: I1128 14:08:12.061753 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f"} Nov 28 14:08:12 crc kubenswrapper[4857]: I1128 14:08:12.062058 4857 scope.go:117] "RemoveContainer" containerID="3516092786f533a199a3e095718056c7b89b4e92fc20c4820d74a02d53e0de82" Nov 28 14:08:12 crc kubenswrapper[4857]: I1128 14:08:12.062756 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:08:12 crc kubenswrapper[4857]: E1128 14:08:12.063065 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:08:26 crc kubenswrapper[4857]: I1128 14:08:26.228978 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:08:26 crc kubenswrapper[4857]: E1128 14:08:26.229757 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:08:40 crc kubenswrapper[4857]: I1128 14:08:40.233303 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:08:40 crc kubenswrapper[4857]: E1128 14:08:40.234006 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:08:51 crc kubenswrapper[4857]: I1128 14:08:51.228832 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:08:51 crc kubenswrapper[4857]: E1128 14:08:51.229938 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:09:03 crc kubenswrapper[4857]: I1128 14:09:03.229803 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:09:03 crc kubenswrapper[4857]: E1128 14:09:03.231834 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:09:15 crc kubenswrapper[4857]: I1128 14:09:15.228761 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:09:15 crc kubenswrapper[4857]: E1128 14:09:15.229599 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:09:30 crc kubenswrapper[4857]: I1128 14:09:30.232190 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:09:30 crc kubenswrapper[4857]: E1128 14:09:30.232882 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:09:44 crc kubenswrapper[4857]: I1128 14:09:44.229048 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:09:44 crc kubenswrapper[4857]: E1128 14:09:44.229860 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:09:58 crc kubenswrapper[4857]: I1128 14:09:58.228870 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:09:58 crc kubenswrapper[4857]: E1128 14:09:58.229932 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:10:13 crc kubenswrapper[4857]: I1128 14:10:13.228172 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:10:13 crc kubenswrapper[4857]: E1128 14:10:13.228922 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:10:28 crc kubenswrapper[4857]: I1128 14:10:28.228902 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:10:28 crc kubenswrapper[4857]: E1128 14:10:28.229908 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:10:40 crc kubenswrapper[4857]: I1128 14:10:40.232307 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:10:40 crc kubenswrapper[4857]: E1128 14:10:40.233087 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:10:54 crc kubenswrapper[4857]: I1128 14:10:54.228860 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:10:54 crc kubenswrapper[4857]: E1128 14:10:54.229752 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:11:09 crc kubenswrapper[4857]: I1128 14:11:09.229274 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:11:09 crc kubenswrapper[4857]: E1128 14:11:09.230054 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:11:21 crc kubenswrapper[4857]: I1128 14:11:21.230036 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:11:21 crc kubenswrapper[4857]: E1128 14:11:21.231603 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:11:34 crc kubenswrapper[4857]: I1128 14:11:34.228670 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:11:34 crc kubenswrapper[4857]: E1128 14:11:34.229324 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:11:46 crc kubenswrapper[4857]: I1128 14:11:46.229318 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:11:46 crc kubenswrapper[4857]: E1128 14:11:46.230207 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:11:58 crc kubenswrapper[4857]: I1128 14:11:58.229657 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:11:58 crc kubenswrapper[4857]: E1128 14:11:58.230343 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:12:12 crc kubenswrapper[4857]: I1128 14:12:12.228497 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:12:12 crc kubenswrapper[4857]: E1128 14:12:12.229173 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:12:25 crc kubenswrapper[4857]: I1128 14:12:25.229573 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:12:25 crc kubenswrapper[4857]: E1128 14:12:25.230409 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:12:38 crc kubenswrapper[4857]: I1128 14:12:38.229192 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:12:38 crc kubenswrapper[4857]: E1128 14:12:38.230810 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:12:50 crc kubenswrapper[4857]: I1128 14:12:50.239040 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:12:50 crc kubenswrapper[4857]: E1128 14:12:50.239961 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:13:04 crc kubenswrapper[4857]: I1128 14:13:04.230087 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:13:04 crc kubenswrapper[4857]: E1128 14:13:04.231531 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:13:16 crc kubenswrapper[4857]: I1128 14:13:16.228781 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:13:17 crc kubenswrapper[4857]: I1128 14:13:17.020362 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"c21b36b6844e784455ff90551304b9ff88a395836b3cec4816bcfecc913def32"} Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.531812 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8kp74"] Nov 28 14:14:42 crc kubenswrapper[4857]: E1128 14:14:42.535325 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="extract-content" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.535402 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="extract-content" Nov 28 14:14:42 crc kubenswrapper[4857]: E1128 14:14:42.535461 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="registry-server" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.535472 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="registry-server" Nov 28 14:14:42 crc kubenswrapper[4857]: E1128 14:14:42.535493 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="extract-utilities" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.535502 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="extract-utilities" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.535916 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="553f5b57-9784-4877-8606-218ffae72289" containerName="registry-server" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.538407 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.542751 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8kp74"] Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.641718 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wl4f\" (UniqueName: \"kubernetes.io/projected/325e7763-4ad9-4f7f-b412-5aabe64133cb-kube-api-access-8wl4f\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.641804 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-catalog-content\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.641873 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-utilities\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.743035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-utilities\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.743112 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wl4f\" (UniqueName: \"kubernetes.io/projected/325e7763-4ad9-4f7f-b412-5aabe64133cb-kube-api-access-8wl4f\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.743159 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-catalog-content\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.743704 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-utilities\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.743714 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-catalog-content\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.763797 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wl4f\" (UniqueName: \"kubernetes.io/projected/325e7763-4ad9-4f7f-b412-5aabe64133cb-kube-api-access-8wl4f\") pod \"redhat-operators-8kp74\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:42 crc kubenswrapper[4857]: I1128 14:14:42.868150 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:43 crc kubenswrapper[4857]: I1128 14:14:43.293789 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8kp74"] Nov 28 14:14:43 crc kubenswrapper[4857]: I1128 14:14:43.672765 4857 generic.go:334] "Generic (PLEG): container finished" podID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerID="1a4ceeedb15e7385ff04f6c1fdee60f3788dec14069a7a24742908d1c5d1a948" exitCode=0 Nov 28 14:14:43 crc kubenswrapper[4857]: I1128 14:14:43.672867 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerDied","Data":"1a4ceeedb15e7385ff04f6c1fdee60f3788dec14069a7a24742908d1c5d1a948"} Nov 28 14:14:43 crc kubenswrapper[4857]: I1128 14:14:43.673088 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerStarted","Data":"e9a9780583c6b7bba1adb749c7733f9f020a816303b1c231d54ab63012f0ca7b"} Nov 28 14:14:43 crc kubenswrapper[4857]: I1128 14:14:43.674462 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:14:45 crc kubenswrapper[4857]: I1128 14:14:45.690860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerStarted","Data":"05b2d297798bb8a563c6694377dc17254895f9c352cf3ebd457f7805c910a358"} Nov 28 14:14:46 crc kubenswrapper[4857]: I1128 14:14:46.700025 4857 generic.go:334] "Generic (PLEG): container finished" podID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerID="05b2d297798bb8a563c6694377dc17254895f9c352cf3ebd457f7805c910a358" exitCode=0 Nov 28 14:14:46 crc kubenswrapper[4857]: I1128 14:14:46.700106 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerDied","Data":"05b2d297798bb8a563c6694377dc17254895f9c352cf3ebd457f7805c910a358"} Nov 28 14:14:47 crc kubenswrapper[4857]: I1128 14:14:47.712046 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerStarted","Data":"5f70d87eecea3ece818ba3a12b67795f513aa1c81330c1235e13dbcc2a5f460f"} Nov 28 14:14:47 crc kubenswrapper[4857]: I1128 14:14:47.730476 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8kp74" podStartSLOduration=2.234166423 podStartE2EDuration="5.730461386s" podCreationTimestamp="2025-11-28 14:14:42 +0000 UTC" firstStartedPulling="2025-11-28 14:14:43.674207663 +0000 UTC m=+2733.798149100" lastFinishedPulling="2025-11-28 14:14:47.170502626 +0000 UTC m=+2737.294444063" observedRunningTime="2025-11-28 14:14:47.726728577 +0000 UTC m=+2737.850670014" watchObservedRunningTime="2025-11-28 14:14:47.730461386 +0000 UTC m=+2737.854402823" Nov 28 14:14:52 crc kubenswrapper[4857]: I1128 14:14:52.869289 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:52 crc kubenswrapper[4857]: I1128 14:14:52.870025 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:52 crc kubenswrapper[4857]: I1128 14:14:52.919412 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:53 crc kubenswrapper[4857]: I1128 14:14:53.790690 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:53 crc kubenswrapper[4857]: I1128 14:14:53.838904 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8kp74"] Nov 28 14:14:55 crc kubenswrapper[4857]: I1128 14:14:55.766063 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8kp74" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="registry-server" containerID="cri-o://5f70d87eecea3ece818ba3a12b67795f513aa1c81330c1235e13dbcc2a5f460f" gracePeriod=2 Nov 28 14:14:57 crc kubenswrapper[4857]: I1128 14:14:57.783455 4857 generic.go:334] "Generic (PLEG): container finished" podID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerID="5f70d87eecea3ece818ba3a12b67795f513aa1c81330c1235e13dbcc2a5f460f" exitCode=0 Nov 28 14:14:57 crc kubenswrapper[4857]: I1128 14:14:57.783529 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerDied","Data":"5f70d87eecea3ece818ba3a12b67795f513aa1c81330c1235e13dbcc2a5f460f"} Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.072011 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.142667 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-utilities\") pod \"325e7763-4ad9-4f7f-b412-5aabe64133cb\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.142726 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wl4f\" (UniqueName: \"kubernetes.io/projected/325e7763-4ad9-4f7f-b412-5aabe64133cb-kube-api-access-8wl4f\") pod \"325e7763-4ad9-4f7f-b412-5aabe64133cb\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.142813 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-catalog-content\") pod \"325e7763-4ad9-4f7f-b412-5aabe64133cb\" (UID: \"325e7763-4ad9-4f7f-b412-5aabe64133cb\") " Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.144182 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-utilities" (OuterVolumeSpecName: "utilities") pod "325e7763-4ad9-4f7f-b412-5aabe64133cb" (UID: "325e7763-4ad9-4f7f-b412-5aabe64133cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.153148 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/325e7763-4ad9-4f7f-b412-5aabe64133cb-kube-api-access-8wl4f" (OuterVolumeSpecName: "kube-api-access-8wl4f") pod "325e7763-4ad9-4f7f-b412-5aabe64133cb" (UID: "325e7763-4ad9-4f7f-b412-5aabe64133cb"). InnerVolumeSpecName "kube-api-access-8wl4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.244142 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.244193 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wl4f\" (UniqueName: \"kubernetes.io/projected/325e7763-4ad9-4f7f-b412-5aabe64133cb-kube-api-access-8wl4f\") on node \"crc\" DevicePath \"\"" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.252756 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "325e7763-4ad9-4f7f-b412-5aabe64133cb" (UID: "325e7763-4ad9-4f7f-b412-5aabe64133cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.346481 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/325e7763-4ad9-4f7f-b412-5aabe64133cb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.792065 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8kp74" event={"ID":"325e7763-4ad9-4f7f-b412-5aabe64133cb","Type":"ContainerDied","Data":"e9a9780583c6b7bba1adb749c7733f9f020a816303b1c231d54ab63012f0ca7b"} Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.792095 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8kp74" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.792121 4857 scope.go:117] "RemoveContainer" containerID="5f70d87eecea3ece818ba3a12b67795f513aa1c81330c1235e13dbcc2a5f460f" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.818622 4857 scope.go:117] "RemoveContainer" containerID="05b2d297798bb8a563c6694377dc17254895f9c352cf3ebd457f7805c910a358" Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.830277 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8kp74"] Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.836566 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8kp74"] Nov 28 14:14:58 crc kubenswrapper[4857]: I1128 14:14:58.862193 4857 scope.go:117] "RemoveContainer" containerID="1a4ceeedb15e7385ff04f6c1fdee60f3788dec14069a7a24742908d1c5d1a948" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.147669 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn"] Nov 28 14:15:00 crc kubenswrapper[4857]: E1128 14:15:00.148373 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="extract-utilities" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.148388 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="extract-utilities" Nov 28 14:15:00 crc kubenswrapper[4857]: E1128 14:15:00.148405 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="registry-server" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.148411 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="registry-server" Nov 28 14:15:00 crc kubenswrapper[4857]: E1128 14:15:00.148431 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="extract-content" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.148436 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="extract-content" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.148571 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" containerName="registry-server" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.149129 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.150766 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.151334 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.161500 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn"] Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.237574 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="325e7763-4ad9-4f7f-b412-5aabe64133cb" path="/var/lib/kubelet/pods/325e7763-4ad9-4f7f-b412-5aabe64133cb/volumes" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.271894 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1ba1836-332a-4710-a660-78728a3e65cf-secret-volume\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.272052 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mgv5\" (UniqueName: \"kubernetes.io/projected/b1ba1836-332a-4710-a660-78728a3e65cf-kube-api-access-4mgv5\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.272086 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1ba1836-332a-4710-a660-78728a3e65cf-config-volume\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.373039 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mgv5\" (UniqueName: \"kubernetes.io/projected/b1ba1836-332a-4710-a660-78728a3e65cf-kube-api-access-4mgv5\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.373119 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1ba1836-332a-4710-a660-78728a3e65cf-config-volume\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.374022 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1ba1836-332a-4710-a660-78728a3e65cf-config-volume\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.374106 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1ba1836-332a-4710-a660-78728a3e65cf-secret-volume\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.378130 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1ba1836-332a-4710-a660-78728a3e65cf-secret-volume\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.388240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mgv5\" (UniqueName: \"kubernetes.io/projected/b1ba1836-332a-4710-a660-78728a3e65cf-kube-api-access-4mgv5\") pod \"collect-profiles-29405655-fr6rn\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.496280 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.905050 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn"] Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.960590 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mmrht"] Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.964204 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:00 crc kubenswrapper[4857]: I1128 14:15:00.983809 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmrht"] Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.084164 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cmhx\" (UniqueName: \"kubernetes.io/projected/bdbf5ae7-5b76-4174-843a-e36841abb42b-kube-api-access-9cmhx\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.084543 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-utilities\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.084603 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-catalog-content\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.186245 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-utilities\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.186314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-catalog-content\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.186384 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cmhx\" (UniqueName: \"kubernetes.io/projected/bdbf5ae7-5b76-4174-843a-e36841abb42b-kube-api-access-9cmhx\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.186769 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-utilities\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.186912 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-catalog-content\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.207660 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cmhx\" (UniqueName: \"kubernetes.io/projected/bdbf5ae7-5b76-4174-843a-e36841abb42b-kube-api-access-9cmhx\") pod \"community-operators-mmrht\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.288242 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.607649 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mmrht"] Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.832884 4857 generic.go:334] "Generic (PLEG): container finished" podID="b1ba1836-332a-4710-a660-78728a3e65cf" containerID="53cd5ea547fbbeb5cb167ac1f7a22e7ec9fac0e5e593f4377123f66e74ced88f" exitCode=0 Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.832976 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" event={"ID":"b1ba1836-332a-4710-a660-78728a3e65cf","Type":"ContainerDied","Data":"53cd5ea547fbbeb5cb167ac1f7a22e7ec9fac0e5e593f4377123f66e74ced88f"} Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.833020 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" event={"ID":"b1ba1836-332a-4710-a660-78728a3e65cf","Type":"ContainerStarted","Data":"5ea7505696159962c0facbe3cc6febd804c62340d7099a5a87614f95c8e8cee0"} Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.835559 4857 generic.go:334] "Generic (PLEG): container finished" podID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerID="1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8" exitCode=0 Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.835603 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmrht" event={"ID":"bdbf5ae7-5b76-4174-843a-e36841abb42b","Type":"ContainerDied","Data":"1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8"} Nov 28 14:15:01 crc kubenswrapper[4857]: I1128 14:15:01.835630 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmrht" event={"ID":"bdbf5ae7-5b76-4174-843a-e36841abb42b","Type":"ContainerStarted","Data":"7e74ccd802ddb4399d2b218cffffd2e9e86709957c7d691e1287a8027b99d7f3"} Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.148809 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.214791 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mgv5\" (UniqueName: \"kubernetes.io/projected/b1ba1836-332a-4710-a660-78728a3e65cf-kube-api-access-4mgv5\") pod \"b1ba1836-332a-4710-a660-78728a3e65cf\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.214960 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1ba1836-332a-4710-a660-78728a3e65cf-secret-volume\") pod \"b1ba1836-332a-4710-a660-78728a3e65cf\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.215022 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1ba1836-332a-4710-a660-78728a3e65cf-config-volume\") pod \"b1ba1836-332a-4710-a660-78728a3e65cf\" (UID: \"b1ba1836-332a-4710-a660-78728a3e65cf\") " Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.215800 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1ba1836-332a-4710-a660-78728a3e65cf-config-volume" (OuterVolumeSpecName: "config-volume") pod "b1ba1836-332a-4710-a660-78728a3e65cf" (UID: "b1ba1836-332a-4710-a660-78728a3e65cf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.223091 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1ba1836-332a-4710-a660-78728a3e65cf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b1ba1836-332a-4710-a660-78728a3e65cf" (UID: "b1ba1836-332a-4710-a660-78728a3e65cf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.223214 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1ba1836-332a-4710-a660-78728a3e65cf-kube-api-access-4mgv5" (OuterVolumeSpecName: "kube-api-access-4mgv5") pod "b1ba1836-332a-4710-a660-78728a3e65cf" (UID: "b1ba1836-332a-4710-a660-78728a3e65cf"). InnerVolumeSpecName "kube-api-access-4mgv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.316703 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b1ba1836-332a-4710-a660-78728a3e65cf-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.317086 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b1ba1836-332a-4710-a660-78728a3e65cf-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.317097 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mgv5\" (UniqueName: \"kubernetes.io/projected/b1ba1836-332a-4710-a660-78728a3e65cf-kube-api-access-4mgv5\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.849801 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" event={"ID":"b1ba1836-332a-4710-a660-78728a3e65cf","Type":"ContainerDied","Data":"5ea7505696159962c0facbe3cc6febd804c62340d7099a5a87614f95c8e8cee0"} Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.849864 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ea7505696159962c0facbe3cc6febd804c62340d7099a5a87614f95c8e8cee0" Nov 28 14:15:03 crc kubenswrapper[4857]: I1128 14:15:03.849982 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn" Nov 28 14:15:04 crc kubenswrapper[4857]: I1128 14:15:04.221506 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4"] Nov 28 14:15:04 crc kubenswrapper[4857]: I1128 14:15:04.225979 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405610-z8jl4"] Nov 28 14:15:04 crc kubenswrapper[4857]: I1128 14:15:04.239500 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49fd1de8-3f87-4d33-b209-df83dd1096a8" path="/var/lib/kubelet/pods/49fd1de8-3f87-4d33-b209-df83dd1096a8/volumes" Nov 28 14:15:04 crc kubenswrapper[4857]: I1128 14:15:04.858572 4857 generic.go:334] "Generic (PLEG): container finished" podID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerID="668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97" exitCode=0 Nov 28 14:15:04 crc kubenswrapper[4857]: I1128 14:15:04.858669 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmrht" event={"ID":"bdbf5ae7-5b76-4174-843a-e36841abb42b","Type":"ContainerDied","Data":"668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97"} Nov 28 14:15:05 crc kubenswrapper[4857]: I1128 14:15:05.867911 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmrht" event={"ID":"bdbf5ae7-5b76-4174-843a-e36841abb42b","Type":"ContainerStarted","Data":"2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a"} Nov 28 14:15:05 crc kubenswrapper[4857]: I1128 14:15:05.895241 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mmrht" podStartSLOduration=2.389506429 podStartE2EDuration="5.895219293s" podCreationTimestamp="2025-11-28 14:15:00 +0000 UTC" firstStartedPulling="2025-11-28 14:15:01.836906045 +0000 UTC m=+2751.960847472" lastFinishedPulling="2025-11-28 14:15:05.342618909 +0000 UTC m=+2755.466560336" observedRunningTime="2025-11-28 14:15:05.890202879 +0000 UTC m=+2756.014144336" watchObservedRunningTime="2025-11-28 14:15:05.895219293 +0000 UTC m=+2756.019160740" Nov 28 14:15:11 crc kubenswrapper[4857]: I1128 14:15:11.288532 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:11 crc kubenswrapper[4857]: I1128 14:15:11.289685 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:11 crc kubenswrapper[4857]: I1128 14:15:11.333592 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:11 crc kubenswrapper[4857]: I1128 14:15:11.948666 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:11 crc kubenswrapper[4857]: I1128 14:15:11.998438 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmrht"] Nov 28 14:15:13 crc kubenswrapper[4857]: I1128 14:15:13.960753 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mmrht" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="registry-server" containerID="cri-o://2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a" gracePeriod=2 Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.520638 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.699810 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-catalog-content\") pod \"bdbf5ae7-5b76-4174-843a-e36841abb42b\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.700254 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cmhx\" (UniqueName: \"kubernetes.io/projected/bdbf5ae7-5b76-4174-843a-e36841abb42b-kube-api-access-9cmhx\") pod \"bdbf5ae7-5b76-4174-843a-e36841abb42b\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.700295 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-utilities\") pod \"bdbf5ae7-5b76-4174-843a-e36841abb42b\" (UID: \"bdbf5ae7-5b76-4174-843a-e36841abb42b\") " Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.701492 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-utilities" (OuterVolumeSpecName: "utilities") pod "bdbf5ae7-5b76-4174-843a-e36841abb42b" (UID: "bdbf5ae7-5b76-4174-843a-e36841abb42b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.702511 4857 scope.go:117] "RemoveContainer" containerID="039646d2cc0856a7cd5b98a3c85cd50c130f5b53d297446b8ca635aa7ff46c20" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.706528 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdbf5ae7-5b76-4174-843a-e36841abb42b-kube-api-access-9cmhx" (OuterVolumeSpecName: "kube-api-access-9cmhx") pod "bdbf5ae7-5b76-4174-843a-e36841abb42b" (UID: "bdbf5ae7-5b76-4174-843a-e36841abb42b"). InnerVolumeSpecName "kube-api-access-9cmhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.750046 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bdbf5ae7-5b76-4174-843a-e36841abb42b" (UID: "bdbf5ae7-5b76-4174-843a-e36841abb42b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.802420 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.802450 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdbf5ae7-5b76-4174-843a-e36841abb42b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.802476 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cmhx\" (UniqueName: \"kubernetes.io/projected/bdbf5ae7-5b76-4174-843a-e36841abb42b-kube-api-access-9cmhx\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.973301 4857 generic.go:334] "Generic (PLEG): container finished" podID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerID="2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a" exitCode=0 Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.973356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmrht" event={"ID":"bdbf5ae7-5b76-4174-843a-e36841abb42b","Type":"ContainerDied","Data":"2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a"} Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.973384 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mmrht" event={"ID":"bdbf5ae7-5b76-4174-843a-e36841abb42b","Type":"ContainerDied","Data":"7e74ccd802ddb4399d2b218cffffd2e9e86709957c7d691e1287a8027b99d7f3"} Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.973407 4857 scope.go:117] "RemoveContainer" containerID="2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.973532 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mmrht" Nov 28 14:15:15 crc kubenswrapper[4857]: I1128 14:15:15.994096 4857 scope.go:117] "RemoveContainer" containerID="668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.004160 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mmrht"] Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.010854 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mmrht"] Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.028482 4857 scope.go:117] "RemoveContainer" containerID="1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.041821 4857 scope.go:117] "RemoveContainer" containerID="2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a" Nov 28 14:15:16 crc kubenswrapper[4857]: E1128 14:15:16.042179 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a\": container with ID starting with 2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a not found: ID does not exist" containerID="2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.042220 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a"} err="failed to get container status \"2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a\": rpc error: code = NotFound desc = could not find container \"2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a\": container with ID starting with 2f6037fb8f5ec5b6a65e137d98c817b874292603aefdbcff5076149a7253e94a not found: ID does not exist" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.042246 4857 scope.go:117] "RemoveContainer" containerID="668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97" Nov 28 14:15:16 crc kubenswrapper[4857]: E1128 14:15:16.042507 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97\": container with ID starting with 668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97 not found: ID does not exist" containerID="668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.042536 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97"} err="failed to get container status \"668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97\": rpc error: code = NotFound desc = could not find container \"668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97\": container with ID starting with 668e722f720c8e95879f2de8c1ef5ead70ded6a99f7a05ab9a4845cead6fcf97 not found: ID does not exist" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.042553 4857 scope.go:117] "RemoveContainer" containerID="1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8" Nov 28 14:15:16 crc kubenswrapper[4857]: E1128 14:15:16.042800 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8\": container with ID starting with 1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8 not found: ID does not exist" containerID="1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.042839 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8"} err="failed to get container status \"1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8\": rpc error: code = NotFound desc = could not find container \"1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8\": container with ID starting with 1669fd7d3e7382c6e81444f0c20b246b803b1051e81cbf18ee4f315f86edaec8 not found: ID does not exist" Nov 28 14:15:16 crc kubenswrapper[4857]: I1128 14:15:16.237436 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" path="/var/lib/kubelet/pods/bdbf5ae7-5b76-4174-843a-e36841abb42b/volumes" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.160179 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5gwh6"] Nov 28 14:15:25 crc kubenswrapper[4857]: E1128 14:15:25.161065 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="extract-content" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.161085 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="extract-content" Nov 28 14:15:25 crc kubenswrapper[4857]: E1128 14:15:25.161107 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="extract-utilities" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.161116 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="extract-utilities" Nov 28 14:15:25 crc kubenswrapper[4857]: E1128 14:15:25.161128 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1ba1836-332a-4710-a660-78728a3e65cf" containerName="collect-profiles" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.161137 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1ba1836-332a-4710-a660-78728a3e65cf" containerName="collect-profiles" Nov 28 14:15:25 crc kubenswrapper[4857]: E1128 14:15:25.161156 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="registry-server" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.161164 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="registry-server" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.161332 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdbf5ae7-5b76-4174-843a-e36841abb42b" containerName="registry-server" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.161357 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1ba1836-332a-4710-a660-78728a3e65cf" containerName="collect-profiles" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.162600 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.178359 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5gwh6"] Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.327991 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-utilities\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.328296 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfs5j\" (UniqueName: \"kubernetes.io/projected/1201928e-1381-41e6-8dd4-9d55efce2c3c-kube-api-access-xfs5j\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.328550 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-catalog-content\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.430453 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-utilities\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.430523 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfs5j\" (UniqueName: \"kubernetes.io/projected/1201928e-1381-41e6-8dd4-9d55efce2c3c-kube-api-access-xfs5j\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.430600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-catalog-content\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.431023 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-utilities\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.431058 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-catalog-content\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.450979 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfs5j\" (UniqueName: \"kubernetes.io/projected/1201928e-1381-41e6-8dd4-9d55efce2c3c-kube-api-access-xfs5j\") pod \"redhat-marketplace-5gwh6\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.484740 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:25 crc kubenswrapper[4857]: I1128 14:15:25.916304 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5gwh6"] Nov 28 14:15:26 crc kubenswrapper[4857]: I1128 14:15:26.053418 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5gwh6" event={"ID":"1201928e-1381-41e6-8dd4-9d55efce2c3c","Type":"ContainerStarted","Data":"0237452119e8df7962acd9e97458bdd3657a3c83fae3cd65e018ff5cf4db3778"} Nov 28 14:15:27 crc kubenswrapper[4857]: I1128 14:15:27.063221 4857 generic.go:334] "Generic (PLEG): container finished" podID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerID="5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c" exitCode=0 Nov 28 14:15:27 crc kubenswrapper[4857]: I1128 14:15:27.064423 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5gwh6" event={"ID":"1201928e-1381-41e6-8dd4-9d55efce2c3c","Type":"ContainerDied","Data":"5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c"} Nov 28 14:15:28 crc kubenswrapper[4857]: I1128 14:15:28.073485 4857 generic.go:334] "Generic (PLEG): container finished" podID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerID="496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774" exitCode=0 Nov 28 14:15:28 crc kubenswrapper[4857]: I1128 14:15:28.073545 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5gwh6" event={"ID":"1201928e-1381-41e6-8dd4-9d55efce2c3c","Type":"ContainerDied","Data":"496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774"} Nov 28 14:15:29 crc kubenswrapper[4857]: I1128 14:15:29.083982 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5gwh6" event={"ID":"1201928e-1381-41e6-8dd4-9d55efce2c3c","Type":"ContainerStarted","Data":"b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a"} Nov 28 14:15:29 crc kubenswrapper[4857]: I1128 14:15:29.104631 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5gwh6" podStartSLOduration=2.445974169 podStartE2EDuration="4.104616633s" podCreationTimestamp="2025-11-28 14:15:25 +0000 UTC" firstStartedPulling="2025-11-28 14:15:27.066918301 +0000 UTC m=+2777.190859738" lastFinishedPulling="2025-11-28 14:15:28.725560765 +0000 UTC m=+2778.849502202" observedRunningTime="2025-11-28 14:15:29.100300148 +0000 UTC m=+2779.224241585" watchObservedRunningTime="2025-11-28 14:15:29.104616633 +0000 UTC m=+2779.228558060" Nov 28 14:15:35 crc kubenswrapper[4857]: I1128 14:15:35.485290 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:35 crc kubenswrapper[4857]: I1128 14:15:35.485923 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:35 crc kubenswrapper[4857]: I1128 14:15:35.526488 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:36 crc kubenswrapper[4857]: I1128 14:15:36.173860 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:36 crc kubenswrapper[4857]: I1128 14:15:36.224497 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5gwh6"] Nov 28 14:15:38 crc kubenswrapper[4857]: I1128 14:15:38.146066 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5gwh6" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="registry-server" containerID="cri-o://b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a" gracePeriod=2 Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.050348 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.128332 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-catalog-content\") pod \"1201928e-1381-41e6-8dd4-9d55efce2c3c\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.149770 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1201928e-1381-41e6-8dd4-9d55efce2c3c" (UID: "1201928e-1381-41e6-8dd4-9d55efce2c3c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.153753 4857 generic.go:334] "Generic (PLEG): container finished" podID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerID="b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a" exitCode=0 Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.153800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5gwh6" event={"ID":"1201928e-1381-41e6-8dd4-9d55efce2c3c","Type":"ContainerDied","Data":"b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a"} Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.153827 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5gwh6" event={"ID":"1201928e-1381-41e6-8dd4-9d55efce2c3c","Type":"ContainerDied","Data":"0237452119e8df7962acd9e97458bdd3657a3c83fae3cd65e018ff5cf4db3778"} Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.153844 4857 scope.go:117] "RemoveContainer" containerID="b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.153899 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5gwh6" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.169286 4857 scope.go:117] "RemoveContainer" containerID="496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.185295 4857 scope.go:117] "RemoveContainer" containerID="5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.210291 4857 scope.go:117] "RemoveContainer" containerID="b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a" Nov 28 14:15:39 crc kubenswrapper[4857]: E1128 14:15:39.212166 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a\": container with ID starting with b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a not found: ID does not exist" containerID="b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.212290 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a"} err="failed to get container status \"b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a\": rpc error: code = NotFound desc = could not find container \"b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a\": container with ID starting with b2f7b7b56c6bef8f9d53206a5cfe1c85b5c46d36eb8ec67aeee7ca85b286222a not found: ID does not exist" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.212374 4857 scope.go:117] "RemoveContainer" containerID="496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774" Nov 28 14:15:39 crc kubenswrapper[4857]: E1128 14:15:39.213096 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774\": container with ID starting with 496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774 not found: ID does not exist" containerID="496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.213126 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774"} err="failed to get container status \"496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774\": rpc error: code = NotFound desc = could not find container \"496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774\": container with ID starting with 496cfbcd85903fd16594e0dc75a97bd53dd94655ef368b9a32fab0a1dd852774 not found: ID does not exist" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.213156 4857 scope.go:117] "RemoveContainer" containerID="5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c" Nov 28 14:15:39 crc kubenswrapper[4857]: E1128 14:15:39.213623 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c\": container with ID starting with 5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c not found: ID does not exist" containerID="5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.213645 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c"} err="failed to get container status \"5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c\": rpc error: code = NotFound desc = could not find container \"5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c\": container with ID starting with 5e9f2681c3f99d3dc48c1a4669821029b95ea9849044d08af3b17a3c1e5b688c not found: ID does not exist" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.229394 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfs5j\" (UniqueName: \"kubernetes.io/projected/1201928e-1381-41e6-8dd4-9d55efce2c3c-kube-api-access-xfs5j\") pod \"1201928e-1381-41e6-8dd4-9d55efce2c3c\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.229477 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-utilities\") pod \"1201928e-1381-41e6-8dd4-9d55efce2c3c\" (UID: \"1201928e-1381-41e6-8dd4-9d55efce2c3c\") " Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.229975 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.230127 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-utilities" (OuterVolumeSpecName: "utilities") pod "1201928e-1381-41e6-8dd4-9d55efce2c3c" (UID: "1201928e-1381-41e6-8dd4-9d55efce2c3c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.236859 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1201928e-1381-41e6-8dd4-9d55efce2c3c-kube-api-access-xfs5j" (OuterVolumeSpecName: "kube-api-access-xfs5j") pod "1201928e-1381-41e6-8dd4-9d55efce2c3c" (UID: "1201928e-1381-41e6-8dd4-9d55efce2c3c"). InnerVolumeSpecName "kube-api-access-xfs5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.331074 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfs5j\" (UniqueName: \"kubernetes.io/projected/1201928e-1381-41e6-8dd4-9d55efce2c3c-kube-api-access-xfs5j\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.331346 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1201928e-1381-41e6-8dd4-9d55efce2c3c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.490940 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5gwh6"] Nov 28 14:15:39 crc kubenswrapper[4857]: I1128 14:15:39.497786 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5gwh6"] Nov 28 14:15:40 crc kubenswrapper[4857]: I1128 14:15:40.238832 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" path="/var/lib/kubelet/pods/1201928e-1381-41e6-8dd4-9d55efce2c3c/volumes" Nov 28 14:15:41 crc kubenswrapper[4857]: I1128 14:15:41.308467 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:15:41 crc kubenswrapper[4857]: I1128 14:15:41.308524 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:16:11 crc kubenswrapper[4857]: I1128 14:16:11.308911 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:16:11 crc kubenswrapper[4857]: I1128 14:16:11.309745 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.308584 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.309397 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.309467 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.310428 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c21b36b6844e784455ff90551304b9ff88a395836b3cec4816bcfecc913def32"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.310552 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://c21b36b6844e784455ff90551304b9ff88a395836b3cec4816bcfecc913def32" gracePeriod=600 Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.643514 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="c21b36b6844e784455ff90551304b9ff88a395836b3cec4816bcfecc913def32" exitCode=0 Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.643751 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"c21b36b6844e784455ff90551304b9ff88a395836b3cec4816bcfecc913def32"} Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.644266 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101"} Nov 28 14:16:41 crc kubenswrapper[4857]: I1128 14:16:41.644356 4857 scope.go:117] "RemoveContainer" containerID="746c210a1bd90a856e577cc52c82e3eb0d8c3877e02c738daba7fed28ad73c0f" Nov 28 14:18:41 crc kubenswrapper[4857]: I1128 14:18:41.308499 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:18:41 crc kubenswrapper[4857]: I1128 14:18:41.309528 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:19:11 crc kubenswrapper[4857]: I1128 14:19:11.308491 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:19:11 crc kubenswrapper[4857]: I1128 14:19:11.309151 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.174943 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-99hjq"] Nov 28 14:19:15 crc kubenswrapper[4857]: E1128 14:19:15.176177 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="registry-server" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.176201 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="registry-server" Nov 28 14:19:15 crc kubenswrapper[4857]: E1128 14:19:15.176254 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="extract-utilities" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.176267 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="extract-utilities" Nov 28 14:19:15 crc kubenswrapper[4857]: E1128 14:19:15.176295 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="extract-content" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.176309 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="extract-content" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.176604 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1201928e-1381-41e6-8dd4-9d55efce2c3c" containerName="registry-server" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.178458 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.207020 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2926817-d9c2-4a87-baa5-58b9483eef00-utilities\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.207081 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2926817-d9c2-4a87-baa5-58b9483eef00-catalog-content\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.207117 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzwrf\" (UniqueName: \"kubernetes.io/projected/a2926817-d9c2-4a87-baa5-58b9483eef00-kube-api-access-bzwrf\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.221732 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-99hjq"] Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.308924 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2926817-d9c2-4a87-baa5-58b9483eef00-utilities\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.309004 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2926817-d9c2-4a87-baa5-58b9483eef00-catalog-content\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.309056 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzwrf\" (UniqueName: \"kubernetes.io/projected/a2926817-d9c2-4a87-baa5-58b9483eef00-kube-api-access-bzwrf\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.309605 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2926817-d9c2-4a87-baa5-58b9483eef00-utilities\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.309648 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2926817-d9c2-4a87-baa5-58b9483eef00-catalog-content\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.332074 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzwrf\" (UniqueName: \"kubernetes.io/projected/a2926817-d9c2-4a87-baa5-58b9483eef00-kube-api-access-bzwrf\") pod \"certified-operators-99hjq\" (UID: \"a2926817-d9c2-4a87-baa5-58b9483eef00\") " pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:15 crc kubenswrapper[4857]: I1128 14:19:15.529533 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:16 crc kubenswrapper[4857]: I1128 14:19:16.004817 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-99hjq"] Nov 28 14:19:16 crc kubenswrapper[4857]: I1128 14:19:16.867625 4857 generic.go:334] "Generic (PLEG): container finished" podID="a2926817-d9c2-4a87-baa5-58b9483eef00" containerID="d0aada6ded56cc819700457b4529b04acd83023c08739be0c18b26e8ee97e82c" exitCode=0 Nov 28 14:19:16 crc kubenswrapper[4857]: I1128 14:19:16.867717 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-99hjq" event={"ID":"a2926817-d9c2-4a87-baa5-58b9483eef00","Type":"ContainerDied","Data":"d0aada6ded56cc819700457b4529b04acd83023c08739be0c18b26e8ee97e82c"} Nov 28 14:19:16 crc kubenswrapper[4857]: I1128 14:19:16.867932 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-99hjq" event={"ID":"a2926817-d9c2-4a87-baa5-58b9483eef00","Type":"ContainerStarted","Data":"dc033d1b7ca6357233ebe7e15ae6ace3ac652ce7fde4b6975128534c5825c73f"} Nov 28 14:19:22 crc kubenswrapper[4857]: I1128 14:19:22.914281 4857 generic.go:334] "Generic (PLEG): container finished" podID="a2926817-d9c2-4a87-baa5-58b9483eef00" containerID="511f6886b05c2b81becf86a1033fedfca4e8331057efe8de97beaacd9d2ab49f" exitCode=0 Nov 28 14:19:22 crc kubenswrapper[4857]: I1128 14:19:22.914409 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-99hjq" event={"ID":"a2926817-d9c2-4a87-baa5-58b9483eef00","Type":"ContainerDied","Data":"511f6886b05c2b81becf86a1033fedfca4e8331057efe8de97beaacd9d2ab49f"} Nov 28 14:19:23 crc kubenswrapper[4857]: I1128 14:19:23.924017 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-99hjq" event={"ID":"a2926817-d9c2-4a87-baa5-58b9483eef00","Type":"ContainerStarted","Data":"e831f67593a82b35cde5a17af70f5d95d91f218b1906e70a29f56c5b65588ae9"} Nov 28 14:19:23 crc kubenswrapper[4857]: I1128 14:19:23.943792 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-99hjq" podStartSLOduration=2.418144659 podStartE2EDuration="8.943774288s" podCreationTimestamp="2025-11-28 14:19:15 +0000 UTC" firstStartedPulling="2025-11-28 14:19:16.87208961 +0000 UTC m=+3006.996031077" lastFinishedPulling="2025-11-28 14:19:23.397719259 +0000 UTC m=+3013.521660706" observedRunningTime="2025-11-28 14:19:23.942052392 +0000 UTC m=+3014.065993849" watchObservedRunningTime="2025-11-28 14:19:23.943774288 +0000 UTC m=+3014.067715725" Nov 28 14:19:25 crc kubenswrapper[4857]: I1128 14:19:25.530455 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:25 crc kubenswrapper[4857]: I1128 14:19:25.530529 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:25 crc kubenswrapper[4857]: I1128 14:19:25.598563 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:35 crc kubenswrapper[4857]: I1128 14:19:35.584593 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-99hjq" Nov 28 14:19:35 crc kubenswrapper[4857]: I1128 14:19:35.657474 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-99hjq"] Nov 28 14:19:35 crc kubenswrapper[4857]: I1128 14:19:35.698909 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pbgdj"] Nov 28 14:19:35 crc kubenswrapper[4857]: I1128 14:19:35.699314 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pbgdj" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="registry-server" containerID="cri-o://6487608a645ff66f82c8a01b776ec303ecb5feaff63daa9ef1f9da55971ff37e" gracePeriod=2 Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.026824 4857 generic.go:334] "Generic (PLEG): container finished" podID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerID="6487608a645ff66f82c8a01b776ec303ecb5feaff63daa9ef1f9da55971ff37e" exitCode=0 Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.026998 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbgdj" event={"ID":"9efdbce5-c29e-4601-bc0e-0b65a8dd7899","Type":"ContainerDied","Data":"6487608a645ff66f82c8a01b776ec303ecb5feaff63daa9ef1f9da55971ff37e"} Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.112672 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.223680 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-utilities\") pod \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.223740 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-catalog-content\") pod \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.223823 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78jcg\" (UniqueName: \"kubernetes.io/projected/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-kube-api-access-78jcg\") pod \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\" (UID: \"9efdbce5-c29e-4601-bc0e-0b65a8dd7899\") " Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.224727 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-utilities" (OuterVolumeSpecName: "utilities") pod "9efdbce5-c29e-4601-bc0e-0b65a8dd7899" (UID: "9efdbce5-c29e-4601-bc0e-0b65a8dd7899"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.234651 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-kube-api-access-78jcg" (OuterVolumeSpecName: "kube-api-access-78jcg") pod "9efdbce5-c29e-4601-bc0e-0b65a8dd7899" (UID: "9efdbce5-c29e-4601-bc0e-0b65a8dd7899"). InnerVolumeSpecName "kube-api-access-78jcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.280375 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9efdbce5-c29e-4601-bc0e-0b65a8dd7899" (UID: "9efdbce5-c29e-4601-bc0e-0b65a8dd7899"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.325489 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78jcg\" (UniqueName: \"kubernetes.io/projected/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-kube-api-access-78jcg\") on node \"crc\" DevicePath \"\"" Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.325591 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:19:36 crc kubenswrapper[4857]: I1128 14:19:36.325692 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efdbce5-c29e-4601-bc0e-0b65a8dd7899-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.036916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pbgdj" event={"ID":"9efdbce5-c29e-4601-bc0e-0b65a8dd7899","Type":"ContainerDied","Data":"c3a984c02b345fb4e352a6adb6e9d5c7c73a15b8e5e73fbd487b57b984b41735"} Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.036970 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pbgdj" Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.036990 4857 scope.go:117] "RemoveContainer" containerID="6487608a645ff66f82c8a01b776ec303ecb5feaff63daa9ef1f9da55971ff37e" Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.074643 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pbgdj"] Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.078127 4857 scope.go:117] "RemoveContainer" containerID="5aad8171f8d7fe2c692ccb90553a51eb6751f9cf7b229c2c61f4e302d8906f6a" Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.085453 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pbgdj"] Nov 28 14:19:37 crc kubenswrapper[4857]: I1128 14:19:37.100257 4857 scope.go:117] "RemoveContainer" containerID="846a4d63c6dd4ec22c650ff501b64bd62fa270b278543476618ec41a7158afe2" Nov 28 14:19:38 crc kubenswrapper[4857]: I1128 14:19:38.237117 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" path="/var/lib/kubelet/pods/9efdbce5-c29e-4601-bc0e-0b65a8dd7899/volumes" Nov 28 14:19:41 crc kubenswrapper[4857]: I1128 14:19:41.309042 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:19:41 crc kubenswrapper[4857]: I1128 14:19:41.309320 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:19:41 crc kubenswrapper[4857]: I1128 14:19:41.309363 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:19:41 crc kubenswrapper[4857]: I1128 14:19:41.309939 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:19:41 crc kubenswrapper[4857]: I1128 14:19:41.310008 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" gracePeriod=600 Nov 28 14:19:41 crc kubenswrapper[4857]: E1128 14:19:41.442998 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:19:42 crc kubenswrapper[4857]: I1128 14:19:42.076978 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" exitCode=0 Nov 28 14:19:42 crc kubenswrapper[4857]: I1128 14:19:42.077025 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101"} Nov 28 14:19:42 crc kubenswrapper[4857]: I1128 14:19:42.077067 4857 scope.go:117] "RemoveContainer" containerID="c21b36b6844e784455ff90551304b9ff88a395836b3cec4816bcfecc913def32" Nov 28 14:19:42 crc kubenswrapper[4857]: I1128 14:19:42.077515 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:19:42 crc kubenswrapper[4857]: E1128 14:19:42.077817 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:19:55 crc kubenswrapper[4857]: I1128 14:19:55.229876 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:19:55 crc kubenswrapper[4857]: E1128 14:19:55.231159 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:20:08 crc kubenswrapper[4857]: I1128 14:20:08.229479 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:20:08 crc kubenswrapper[4857]: E1128 14:20:08.230757 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:20:21 crc kubenswrapper[4857]: I1128 14:20:21.228654 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:20:21 crc kubenswrapper[4857]: E1128 14:20:21.229534 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:20:33 crc kubenswrapper[4857]: I1128 14:20:33.228641 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:20:33 crc kubenswrapper[4857]: E1128 14:20:33.229300 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:20:45 crc kubenswrapper[4857]: I1128 14:20:45.228555 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:20:45 crc kubenswrapper[4857]: E1128 14:20:45.229663 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:20:59 crc kubenswrapper[4857]: I1128 14:20:59.229514 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:20:59 crc kubenswrapper[4857]: E1128 14:20:59.230457 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:21:12 crc kubenswrapper[4857]: I1128 14:21:12.228608 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:21:12 crc kubenswrapper[4857]: E1128 14:21:12.229669 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:21:25 crc kubenswrapper[4857]: I1128 14:21:25.228557 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:21:25 crc kubenswrapper[4857]: E1128 14:21:25.229413 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:21:40 crc kubenswrapper[4857]: I1128 14:21:40.242273 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:21:40 crc kubenswrapper[4857]: E1128 14:21:40.243515 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:21:54 crc kubenswrapper[4857]: I1128 14:21:54.229243 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:21:54 crc kubenswrapper[4857]: E1128 14:21:54.230019 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:22:09 crc kubenswrapper[4857]: I1128 14:22:09.229460 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:22:09 crc kubenswrapper[4857]: E1128 14:22:09.231319 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:22:21 crc kubenswrapper[4857]: I1128 14:22:21.228673 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:22:21 crc kubenswrapper[4857]: E1128 14:22:21.229293 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:22:35 crc kubenswrapper[4857]: I1128 14:22:35.229212 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:22:35 crc kubenswrapper[4857]: E1128 14:22:35.230307 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:22:48 crc kubenswrapper[4857]: I1128 14:22:48.229424 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:22:48 crc kubenswrapper[4857]: E1128 14:22:48.231493 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:22:59 crc kubenswrapper[4857]: I1128 14:22:59.231597 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:22:59 crc kubenswrapper[4857]: E1128 14:22:59.233337 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:23:14 crc kubenswrapper[4857]: I1128 14:23:14.230072 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:23:14 crc kubenswrapper[4857]: E1128 14:23:14.231304 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:23:26 crc kubenswrapper[4857]: I1128 14:23:26.229856 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:23:26 crc kubenswrapper[4857]: E1128 14:23:26.230535 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:23:38 crc kubenswrapper[4857]: I1128 14:23:38.229719 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:23:38 crc kubenswrapper[4857]: E1128 14:23:38.230411 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:23:50 crc kubenswrapper[4857]: I1128 14:23:50.236260 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:23:50 crc kubenswrapper[4857]: E1128 14:23:50.237502 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:24:01 crc kubenswrapper[4857]: I1128 14:24:01.229594 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:24:01 crc kubenswrapper[4857]: E1128 14:24:01.230711 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:24:13 crc kubenswrapper[4857]: I1128 14:24:13.229097 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:24:13 crc kubenswrapper[4857]: E1128 14:24:13.230018 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:24:25 crc kubenswrapper[4857]: I1128 14:24:25.229183 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:24:25 crc kubenswrapper[4857]: E1128 14:24:25.230238 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:24:40 crc kubenswrapper[4857]: I1128 14:24:40.241052 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:24:40 crc kubenswrapper[4857]: E1128 14:24:40.242332 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:24:53 crc kubenswrapper[4857]: I1128 14:24:53.229088 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:24:53 crc kubenswrapper[4857]: I1128 14:24:53.743282 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"33884a03db4de99def0383bbdf2b341b2c68bab325d69eb6823cce32ba9ee316"} Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.015450 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-drdx4"] Nov 28 14:25:56 crc kubenswrapper[4857]: E1128 14:25:56.016525 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="registry-server" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.016540 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="registry-server" Nov 28 14:25:56 crc kubenswrapper[4857]: E1128 14:25:56.016567 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="extract-content" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.016576 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="extract-content" Nov 28 14:25:56 crc kubenswrapper[4857]: E1128 14:25:56.016611 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="extract-utilities" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.016622 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="extract-utilities" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.016855 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9efdbce5-c29e-4601-bc0e-0b65a8dd7899" containerName="registry-server" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.018089 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.038396 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-drdx4"] Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.196352 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-utilities\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.196414 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-catalog-content\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.196462 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc7pt\" (UniqueName: \"kubernetes.io/projected/16784911-bfd0-460a-bd98-13d22b171400-kube-api-access-vc7pt\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.297725 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-utilities\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.297783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-catalog-content\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.298085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc7pt\" (UniqueName: \"kubernetes.io/projected/16784911-bfd0-460a-bd98-13d22b171400-kube-api-access-vc7pt\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.298307 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-catalog-content\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.298355 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-utilities\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.332983 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc7pt\" (UniqueName: \"kubernetes.io/projected/16784911-bfd0-460a-bd98-13d22b171400-kube-api-access-vc7pt\") pod \"community-operators-drdx4\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.340633 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:25:56 crc kubenswrapper[4857]: I1128 14:25:56.782270 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-drdx4"] Nov 28 14:25:57 crc kubenswrapper[4857]: I1128 14:25:57.351555 4857 generic.go:334] "Generic (PLEG): container finished" podID="16784911-bfd0-460a-bd98-13d22b171400" containerID="8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c" exitCode=0 Nov 28 14:25:57 crc kubenswrapper[4857]: I1128 14:25:57.351610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drdx4" event={"ID":"16784911-bfd0-460a-bd98-13d22b171400","Type":"ContainerDied","Data":"8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c"} Nov 28 14:25:57 crc kubenswrapper[4857]: I1128 14:25:57.351638 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drdx4" event={"ID":"16784911-bfd0-460a-bd98-13d22b171400","Type":"ContainerStarted","Data":"525f0b04e19212a522c0cc37ea45a39dff72d4e1220499abe4440b108484818f"} Nov 28 14:25:57 crc kubenswrapper[4857]: I1128 14:25:57.357334 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.009291 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wzfgm"] Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.016329 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.030199 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzfgm"] Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.144907 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-catalog-content\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.145002 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-utilities\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.145091 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5cwn\" (UniqueName: \"kubernetes.io/projected/6be45124-40b7-44f3-9190-40017c783275-kube-api-access-m5cwn\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.246899 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-catalog-content\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.246982 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-utilities\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.247038 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5cwn\" (UniqueName: \"kubernetes.io/projected/6be45124-40b7-44f3-9190-40017c783275-kube-api-access-m5cwn\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.247614 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-utilities\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.247614 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-catalog-content\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.272899 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5cwn\" (UniqueName: \"kubernetes.io/projected/6be45124-40b7-44f3-9190-40017c783275-kube-api-access-m5cwn\") pod \"redhat-marketplace-wzfgm\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.351632 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.377051 4857 generic.go:334] "Generic (PLEG): container finished" podID="16784911-bfd0-460a-bd98-13d22b171400" containerID="3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36" exitCode=0 Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.377115 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drdx4" event={"ID":"16784911-bfd0-460a-bd98-13d22b171400","Type":"ContainerDied","Data":"3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36"} Nov 28 14:25:59 crc kubenswrapper[4857]: I1128 14:25:59.781812 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzfgm"] Nov 28 14:26:00 crc kubenswrapper[4857]: I1128 14:26:00.384069 4857 generic.go:334] "Generic (PLEG): container finished" podID="6be45124-40b7-44f3-9190-40017c783275" containerID="022e4349fca9d7c03bf962cb60344fda882bc6d768197d9e398719ba8a9c6bf1" exitCode=0 Nov 28 14:26:00 crc kubenswrapper[4857]: I1128 14:26:00.384134 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzfgm" event={"ID":"6be45124-40b7-44f3-9190-40017c783275","Type":"ContainerDied","Data":"022e4349fca9d7c03bf962cb60344fda882bc6d768197d9e398719ba8a9c6bf1"} Nov 28 14:26:00 crc kubenswrapper[4857]: I1128 14:26:00.384410 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzfgm" event={"ID":"6be45124-40b7-44f3-9190-40017c783275","Type":"ContainerStarted","Data":"d0d723cc21d3546ec2440468cf80832524095f49c39e3527a5dbd30650a8d712"} Nov 28 14:26:00 crc kubenswrapper[4857]: I1128 14:26:00.389204 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drdx4" event={"ID":"16784911-bfd0-460a-bd98-13d22b171400","Type":"ContainerStarted","Data":"2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3"} Nov 28 14:26:00 crc kubenswrapper[4857]: I1128 14:26:00.422373 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-drdx4" podStartSLOduration=2.8897057779999997 podStartE2EDuration="5.422355586s" podCreationTimestamp="2025-11-28 14:25:55 +0000 UTC" firstStartedPulling="2025-11-28 14:25:57.356064487 +0000 UTC m=+3407.480005954" lastFinishedPulling="2025-11-28 14:25:59.888714325 +0000 UTC m=+3410.012655762" observedRunningTime="2025-11-28 14:26:00.418221016 +0000 UTC m=+3410.542162473" watchObservedRunningTime="2025-11-28 14:26:00.422355586 +0000 UTC m=+3410.546297023" Nov 28 14:26:01 crc kubenswrapper[4857]: E1128 14:26:01.721368 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6be45124_40b7_44f3_9190_40017c783275.slice/crio-conmon-114e95c1f78b56f958b11ae654b1efaded4e27a6aa1857a13e7f764b0f39cd3c.scope\": RecentStats: unable to find data in memory cache]" Nov 28 14:26:02 crc kubenswrapper[4857]: I1128 14:26:02.405457 4857 generic.go:334] "Generic (PLEG): container finished" podID="6be45124-40b7-44f3-9190-40017c783275" containerID="114e95c1f78b56f958b11ae654b1efaded4e27a6aa1857a13e7f764b0f39cd3c" exitCode=0 Nov 28 14:26:02 crc kubenswrapper[4857]: I1128 14:26:02.405542 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzfgm" event={"ID":"6be45124-40b7-44f3-9190-40017c783275","Type":"ContainerDied","Data":"114e95c1f78b56f958b11ae654b1efaded4e27a6aa1857a13e7f764b0f39cd3c"} Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.001868 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gxskc"] Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.003831 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.008195 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gxskc"] Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.102227 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-catalog-content\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.102306 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-utilities\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.102342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkpvx\" (UniqueName: \"kubernetes.io/projected/bfd9d30c-1946-4589-9f01-377ad0706a05-kube-api-access-zkpvx\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.203616 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-utilities\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.204023 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkpvx\" (UniqueName: \"kubernetes.io/projected/bfd9d30c-1946-4589-9f01-377ad0706a05-kube-api-access-zkpvx\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.204055 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-utilities\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.204137 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-catalog-content\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.204352 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-catalog-content\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.230398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkpvx\" (UniqueName: \"kubernetes.io/projected/bfd9d30c-1946-4589-9f01-377ad0706a05-kube-api-access-zkpvx\") pod \"redhat-operators-gxskc\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.326000 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.429234 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzfgm" event={"ID":"6be45124-40b7-44f3-9190-40017c783275","Type":"ContainerStarted","Data":"b011eac07b28fed36aede5f3ecdcd343c8a8a64526b4d00e77b56bae2d5a29bc"} Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.454055 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wzfgm" podStartSLOduration=2.789197178 podStartE2EDuration="5.454039352s" podCreationTimestamp="2025-11-28 14:25:58 +0000 UTC" firstStartedPulling="2025-11-28 14:26:00.387317571 +0000 UTC m=+3410.511259008" lastFinishedPulling="2025-11-28 14:26:03.052159745 +0000 UTC m=+3413.176101182" observedRunningTime="2025-11-28 14:26:03.448999608 +0000 UTC m=+3413.572941045" watchObservedRunningTime="2025-11-28 14:26:03.454039352 +0000 UTC m=+3413.577980789" Nov 28 14:26:03 crc kubenswrapper[4857]: I1128 14:26:03.768450 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gxskc"] Nov 28 14:26:04 crc kubenswrapper[4857]: I1128 14:26:04.438618 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerStarted","Data":"ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2"} Nov 28 14:26:04 crc kubenswrapper[4857]: I1128 14:26:04.438675 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerStarted","Data":"f878a64701ae12f7c0cc0daa7a8cd2b5bf9a1644c5acd7ac8558b3c1895fb734"} Nov 28 14:26:05 crc kubenswrapper[4857]: I1128 14:26:05.448620 4857 generic.go:334] "Generic (PLEG): container finished" podID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerID="ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2" exitCode=0 Nov 28 14:26:05 crc kubenswrapper[4857]: I1128 14:26:05.448808 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerDied","Data":"ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2"} Nov 28 14:26:06 crc kubenswrapper[4857]: I1128 14:26:06.341040 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:26:06 crc kubenswrapper[4857]: I1128 14:26:06.341093 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:26:06 crc kubenswrapper[4857]: I1128 14:26:06.379652 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:26:06 crc kubenswrapper[4857]: I1128 14:26:06.507906 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:26:07 crc kubenswrapper[4857]: I1128 14:26:07.187472 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-drdx4"] Nov 28 14:26:07 crc kubenswrapper[4857]: I1128 14:26:07.472853 4857 generic.go:334] "Generic (PLEG): container finished" podID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerID="03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda" exitCode=0 Nov 28 14:26:07 crc kubenswrapper[4857]: I1128 14:26:07.472918 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerDied","Data":"03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda"} Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.485637 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-drdx4" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="registry-server" containerID="cri-o://2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3" gracePeriod=2 Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.909829 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.939015 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-catalog-content\") pod \"16784911-bfd0-460a-bd98-13d22b171400\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.939081 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc7pt\" (UniqueName: \"kubernetes.io/projected/16784911-bfd0-460a-bd98-13d22b171400-kube-api-access-vc7pt\") pod \"16784911-bfd0-460a-bd98-13d22b171400\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.939105 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-utilities\") pod \"16784911-bfd0-460a-bd98-13d22b171400\" (UID: \"16784911-bfd0-460a-bd98-13d22b171400\") " Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.940163 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-utilities" (OuterVolumeSpecName: "utilities") pod "16784911-bfd0-460a-bd98-13d22b171400" (UID: "16784911-bfd0-460a-bd98-13d22b171400"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:26:08 crc kubenswrapper[4857]: I1128 14:26:08.955895 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16784911-bfd0-460a-bd98-13d22b171400-kube-api-access-vc7pt" (OuterVolumeSpecName: "kube-api-access-vc7pt") pod "16784911-bfd0-460a-bd98-13d22b171400" (UID: "16784911-bfd0-460a-bd98-13d22b171400"). InnerVolumeSpecName "kube-api-access-vc7pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.007840 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "16784911-bfd0-460a-bd98-13d22b171400" (UID: "16784911-bfd0-460a-bd98-13d22b171400"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.040379 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.040411 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc7pt\" (UniqueName: \"kubernetes.io/projected/16784911-bfd0-460a-bd98-13d22b171400-kube-api-access-vc7pt\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.040422 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/16784911-bfd0-460a-bd98-13d22b171400-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.351789 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.352173 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.395026 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.505928 4857 generic.go:334] "Generic (PLEG): container finished" podID="16784911-bfd0-460a-bd98-13d22b171400" containerID="2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3" exitCode=0 Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.506011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drdx4" event={"ID":"16784911-bfd0-460a-bd98-13d22b171400","Type":"ContainerDied","Data":"2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3"} Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.506069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-drdx4" event={"ID":"16784911-bfd0-460a-bd98-13d22b171400","Type":"ContainerDied","Data":"525f0b04e19212a522c0cc37ea45a39dff72d4e1220499abe4440b108484818f"} Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.506099 4857 scope.go:117] "RemoveContainer" containerID="2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.506311 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-drdx4" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.511974 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerStarted","Data":"a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e"} Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.536738 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gxskc" podStartSLOduration=4.355092475 podStartE2EDuration="7.536694729s" podCreationTimestamp="2025-11-28 14:26:02 +0000 UTC" firstStartedPulling="2025-11-28 14:26:05.450105762 +0000 UTC m=+3415.574047199" lastFinishedPulling="2025-11-28 14:26:08.631708016 +0000 UTC m=+3418.755649453" observedRunningTime="2025-11-28 14:26:09.53523368 +0000 UTC m=+3419.659175157" watchObservedRunningTime="2025-11-28 14:26:09.536694729 +0000 UTC m=+3419.660636186" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.543203 4857 scope.go:117] "RemoveContainer" containerID="3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.559394 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-drdx4"] Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.566074 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-drdx4"] Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.578047 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.585895 4857 scope.go:117] "RemoveContainer" containerID="8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.606244 4857 scope.go:117] "RemoveContainer" containerID="2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3" Nov 28 14:26:09 crc kubenswrapper[4857]: E1128 14:26:09.606744 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3\": container with ID starting with 2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3 not found: ID does not exist" containerID="2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.606783 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3"} err="failed to get container status \"2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3\": rpc error: code = NotFound desc = could not find container \"2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3\": container with ID starting with 2e13b57ffeda191940068589b8ba8d02f4b4f634396ed7c105cb23facab3dce3 not found: ID does not exist" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.606812 4857 scope.go:117] "RemoveContainer" containerID="3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36" Nov 28 14:26:09 crc kubenswrapper[4857]: E1128 14:26:09.607199 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36\": container with ID starting with 3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36 not found: ID does not exist" containerID="3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.607229 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36"} err="failed to get container status \"3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36\": rpc error: code = NotFound desc = could not find container \"3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36\": container with ID starting with 3eca9ec448153d4b366e620a2dd7459c0c75d660b867fe383ef3bd29ba9fdc36 not found: ID does not exist" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.607245 4857 scope.go:117] "RemoveContainer" containerID="8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c" Nov 28 14:26:09 crc kubenswrapper[4857]: E1128 14:26:09.607609 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c\": container with ID starting with 8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c not found: ID does not exist" containerID="8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c" Nov 28 14:26:09 crc kubenswrapper[4857]: I1128 14:26:09.607640 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c"} err="failed to get container status \"8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c\": rpc error: code = NotFound desc = could not find container \"8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c\": container with ID starting with 8f154ead416e872f5285b08e19ca082218c2de63e880aeff50e40026f0e2e13c not found: ID does not exist" Nov 28 14:26:10 crc kubenswrapper[4857]: I1128 14:26:10.242074 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16784911-bfd0-460a-bd98-13d22b171400" path="/var/lib/kubelet/pods/16784911-bfd0-460a-bd98-13d22b171400/volumes" Nov 28 14:26:12 crc kubenswrapper[4857]: I1128 14:26:12.186844 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzfgm"] Nov 28 14:26:12 crc kubenswrapper[4857]: I1128 14:26:12.187515 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wzfgm" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="registry-server" containerID="cri-o://b011eac07b28fed36aede5f3ecdcd343c8a8a64526b4d00e77b56bae2d5a29bc" gracePeriod=2 Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.326738 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.327033 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.546439 4857 generic.go:334] "Generic (PLEG): container finished" podID="6be45124-40b7-44f3-9190-40017c783275" containerID="b011eac07b28fed36aede5f3ecdcd343c8a8a64526b4d00e77b56bae2d5a29bc" exitCode=0 Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.546542 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzfgm" event={"ID":"6be45124-40b7-44f3-9190-40017c783275","Type":"ContainerDied","Data":"b011eac07b28fed36aede5f3ecdcd343c8a8a64526b4d00e77b56bae2d5a29bc"} Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.724345 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.911542 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5cwn\" (UniqueName: \"kubernetes.io/projected/6be45124-40b7-44f3-9190-40017c783275-kube-api-access-m5cwn\") pod \"6be45124-40b7-44f3-9190-40017c783275\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.911674 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-utilities\") pod \"6be45124-40b7-44f3-9190-40017c783275\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.911729 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-catalog-content\") pod \"6be45124-40b7-44f3-9190-40017c783275\" (UID: \"6be45124-40b7-44f3-9190-40017c783275\") " Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.912514 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-utilities" (OuterVolumeSpecName: "utilities") pod "6be45124-40b7-44f3-9190-40017c783275" (UID: "6be45124-40b7-44f3-9190-40017c783275"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.912782 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.926254 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6be45124-40b7-44f3-9190-40017c783275-kube-api-access-m5cwn" (OuterVolumeSpecName: "kube-api-access-m5cwn") pod "6be45124-40b7-44f3-9190-40017c783275" (UID: "6be45124-40b7-44f3-9190-40017c783275"). InnerVolumeSpecName "kube-api-access-m5cwn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:26:13 crc kubenswrapper[4857]: I1128 14:26:13.929133 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6be45124-40b7-44f3-9190-40017c783275" (UID: "6be45124-40b7-44f3-9190-40017c783275"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.013315 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6be45124-40b7-44f3-9190-40017c783275-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.013532 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5cwn\" (UniqueName: \"kubernetes.io/projected/6be45124-40b7-44f3-9190-40017c783275-kube-api-access-m5cwn\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.401201 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gxskc" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="registry-server" probeResult="failure" output=< Nov 28 14:26:14 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 14:26:14 crc kubenswrapper[4857]: > Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.560587 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wzfgm" event={"ID":"6be45124-40b7-44f3-9190-40017c783275","Type":"ContainerDied","Data":"d0d723cc21d3546ec2440468cf80832524095f49c39e3527a5dbd30650a8d712"} Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.560656 4857 scope.go:117] "RemoveContainer" containerID="b011eac07b28fed36aede5f3ecdcd343c8a8a64526b4d00e77b56bae2d5a29bc" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.560857 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wzfgm" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.585219 4857 scope.go:117] "RemoveContainer" containerID="114e95c1f78b56f958b11ae654b1efaded4e27a6aa1857a13e7f764b0f39cd3c" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.608537 4857 scope.go:117] "RemoveContainer" containerID="022e4349fca9d7c03bf962cb60344fda882bc6d768197d9e398719ba8a9c6bf1" Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.619652 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzfgm"] Nov 28 14:26:14 crc kubenswrapper[4857]: I1128 14:26:14.631870 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wzfgm"] Nov 28 14:26:16 crc kubenswrapper[4857]: I1128 14:26:16.237081 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6be45124-40b7-44f3-9190-40017c783275" path="/var/lib/kubelet/pods/6be45124-40b7-44f3-9190-40017c783275/volumes" Nov 28 14:26:23 crc kubenswrapper[4857]: I1128 14:26:23.397856 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:23 crc kubenswrapper[4857]: I1128 14:26:23.475106 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:23 crc kubenswrapper[4857]: I1128 14:26:23.647074 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gxskc"] Nov 28 14:26:24 crc kubenswrapper[4857]: I1128 14:26:24.660206 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gxskc" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="registry-server" containerID="cri-o://a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e" gracePeriod=2 Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.330074 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.515533 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkpvx\" (UniqueName: \"kubernetes.io/projected/bfd9d30c-1946-4589-9f01-377ad0706a05-kube-api-access-zkpvx\") pod \"bfd9d30c-1946-4589-9f01-377ad0706a05\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.516038 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-catalog-content\") pod \"bfd9d30c-1946-4589-9f01-377ad0706a05\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.519034 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-utilities\") pod \"bfd9d30c-1946-4589-9f01-377ad0706a05\" (UID: \"bfd9d30c-1946-4589-9f01-377ad0706a05\") " Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.520688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-utilities" (OuterVolumeSpecName: "utilities") pod "bfd9d30c-1946-4589-9f01-377ad0706a05" (UID: "bfd9d30c-1946-4589-9f01-377ad0706a05"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.521552 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd9d30c-1946-4589-9f01-377ad0706a05-kube-api-access-zkpvx" (OuterVolumeSpecName: "kube-api-access-zkpvx") pod "bfd9d30c-1946-4589-9f01-377ad0706a05" (UID: "bfd9d30c-1946-4589-9f01-377ad0706a05"). InnerVolumeSpecName "kube-api-access-zkpvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.620540 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bfd9d30c-1946-4589-9f01-377ad0706a05" (UID: "bfd9d30c-1946-4589-9f01-377ad0706a05"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.623458 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.623567 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkpvx\" (UniqueName: \"kubernetes.io/projected/bfd9d30c-1946-4589-9f01-377ad0706a05-kube-api-access-zkpvx\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.624206 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bfd9d30c-1946-4589-9f01-377ad0706a05-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.682771 4857 generic.go:334] "Generic (PLEG): container finished" podID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerID="a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e" exitCode=0 Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.682853 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerDied","Data":"a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e"} Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.682992 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxskc" event={"ID":"bfd9d30c-1946-4589-9f01-377ad0706a05","Type":"ContainerDied","Data":"f878a64701ae12f7c0cc0daa7a8cd2b5bf9a1644c5acd7ac8558b3c1895fb734"} Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.682889 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxskc" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.683028 4857 scope.go:117] "RemoveContainer" containerID="a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.720921 4857 scope.go:117] "RemoveContainer" containerID="03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.731014 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gxskc"] Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.737236 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gxskc"] Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.747231 4857 scope.go:117] "RemoveContainer" containerID="ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.772348 4857 scope.go:117] "RemoveContainer" containerID="a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e" Nov 28 14:26:26 crc kubenswrapper[4857]: E1128 14:26:26.773026 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e\": container with ID starting with a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e not found: ID does not exist" containerID="a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.773100 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e"} err="failed to get container status \"a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e\": rpc error: code = NotFound desc = could not find container \"a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e\": container with ID starting with a1a1aa7aa8dc7dc6e02668e00d4ced13bb58569d58e72614d76287484656a76e not found: ID does not exist" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.773148 4857 scope.go:117] "RemoveContainer" containerID="03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda" Nov 28 14:26:26 crc kubenswrapper[4857]: E1128 14:26:26.773797 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda\": container with ID starting with 03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda not found: ID does not exist" containerID="03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.773837 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda"} err="failed to get container status \"03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda\": rpc error: code = NotFound desc = could not find container \"03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda\": container with ID starting with 03fb5af52051c1b6b6ab969189ff48c2d9625fad382dd6e14459ffc2afa70eda not found: ID does not exist" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.773865 4857 scope.go:117] "RemoveContainer" containerID="ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2" Nov 28 14:26:26 crc kubenswrapper[4857]: E1128 14:26:26.774121 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2\": container with ID starting with ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2 not found: ID does not exist" containerID="ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2" Nov 28 14:26:26 crc kubenswrapper[4857]: I1128 14:26:26.774139 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2"} err="failed to get container status \"ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2\": rpc error: code = NotFound desc = could not find container \"ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2\": container with ID starting with ae47426b328d261c5d01ac0e08a5d1f846b48c6735a117ed39e465daf5d3b9b2 not found: ID does not exist" Nov 28 14:26:28 crc kubenswrapper[4857]: I1128 14:26:28.239394 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" path="/var/lib/kubelet/pods/bfd9d30c-1946-4589-9f01-377ad0706a05/volumes" Nov 28 14:27:11 crc kubenswrapper[4857]: I1128 14:27:11.308806 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:27:11 crc kubenswrapper[4857]: I1128 14:27:11.309447 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:27:41 crc kubenswrapper[4857]: I1128 14:27:41.309030 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:27:41 crc kubenswrapper[4857]: I1128 14:27:41.309622 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.308564 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.309114 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.309170 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.309717 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"33884a03db4de99def0383bbdf2b341b2c68bab325d69eb6823cce32ba9ee316"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.309786 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://33884a03db4de99def0383bbdf2b341b2c68bab325d69eb6823cce32ba9ee316" gracePeriod=600 Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.669783 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="33884a03db4de99def0383bbdf2b341b2c68bab325d69eb6823cce32ba9ee316" exitCode=0 Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.669889 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"33884a03db4de99def0383bbdf2b341b2c68bab325d69eb6823cce32ba9ee316"} Nov 28 14:28:11 crc kubenswrapper[4857]: I1128 14:28:11.670351 4857 scope.go:117] "RemoveContainer" containerID="e09fc0c531d9196e70777fd485fea89ca6b50c21e8037febc16f64c30d069101" Nov 28 14:28:12 crc kubenswrapper[4857]: I1128 14:28:12.685552 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7"} Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.150396 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d"] Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151313 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="extract-utilities" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151333 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="extract-utilities" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151354 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151361 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151376 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151384 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151395 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="extract-content" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151404 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="extract-content" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151419 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="extract-content" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151426 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="extract-content" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151439 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151446 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151459 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="extract-utilities" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151465 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="extract-utilities" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151481 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="extract-content" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151488 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="extract-content" Nov 28 14:30:00 crc kubenswrapper[4857]: E1128 14:30:00.151499 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="extract-utilities" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151505 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="extract-utilities" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151678 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd9d30c-1946-4589-9f01-377ad0706a05" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151707 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="16784911-bfd0-460a-bd98-13d22b171400" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.151721 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6be45124-40b7-44f3-9190-40017c783275" containerName="registry-server" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.152312 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.154510 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.154529 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.162504 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d"] Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.190258 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b78rb\" (UniqueName: \"kubernetes.io/projected/f0747847-802a-4a95-831a-301e1529a41b-kube-api-access-b78rb\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.190345 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0747847-802a-4a95-831a-301e1529a41b-secret-volume\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.190401 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0747847-802a-4a95-831a-301e1529a41b-config-volume\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.291695 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0747847-802a-4a95-831a-301e1529a41b-config-volume\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.291747 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b78rb\" (UniqueName: \"kubernetes.io/projected/f0747847-802a-4a95-831a-301e1529a41b-kube-api-access-b78rb\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.291847 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0747847-802a-4a95-831a-301e1529a41b-secret-volume\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.294090 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0747847-802a-4a95-831a-301e1529a41b-config-volume\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.298885 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0747847-802a-4a95-831a-301e1529a41b-secret-volume\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.312126 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b78rb\" (UniqueName: \"kubernetes.io/projected/f0747847-802a-4a95-831a-301e1529a41b-kube-api-access-b78rb\") pod \"collect-profiles-29405670-nc94d\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.468295 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:00 crc kubenswrapper[4857]: I1128 14:30:00.887645 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d"] Nov 28 14:30:01 crc kubenswrapper[4857]: I1128 14:30:01.749654 4857 generic.go:334] "Generic (PLEG): container finished" podID="f0747847-802a-4a95-831a-301e1529a41b" containerID="959dec720acf8f4fc7b69c5f8a94f17d83b80c8aee3a5189fac23782e7ccadf0" exitCode=0 Nov 28 14:30:01 crc kubenswrapper[4857]: I1128 14:30:01.750155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" event={"ID":"f0747847-802a-4a95-831a-301e1529a41b","Type":"ContainerDied","Data":"959dec720acf8f4fc7b69c5f8a94f17d83b80c8aee3a5189fac23782e7ccadf0"} Nov 28 14:30:01 crc kubenswrapper[4857]: I1128 14:30:01.751168 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" event={"ID":"f0747847-802a-4a95-831a-301e1529a41b","Type":"ContainerStarted","Data":"cebfcff86ea5dc1531808f8235b92241d18a9a181c7753d16439286d3d29edf5"} Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.133207 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.143624 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b78rb\" (UniqueName: \"kubernetes.io/projected/f0747847-802a-4a95-831a-301e1529a41b-kube-api-access-b78rb\") pod \"f0747847-802a-4a95-831a-301e1529a41b\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.143777 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0747847-802a-4a95-831a-301e1529a41b-config-volume\") pod \"f0747847-802a-4a95-831a-301e1529a41b\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.143815 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0747847-802a-4a95-831a-301e1529a41b-secret-volume\") pod \"f0747847-802a-4a95-831a-301e1529a41b\" (UID: \"f0747847-802a-4a95-831a-301e1529a41b\") " Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.145645 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0747847-802a-4a95-831a-301e1529a41b-config-volume" (OuterVolumeSpecName: "config-volume") pod "f0747847-802a-4a95-831a-301e1529a41b" (UID: "f0747847-802a-4a95-831a-301e1529a41b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.150894 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0747847-802a-4a95-831a-301e1529a41b-kube-api-access-b78rb" (OuterVolumeSpecName: "kube-api-access-b78rb") pod "f0747847-802a-4a95-831a-301e1529a41b" (UID: "f0747847-802a-4a95-831a-301e1529a41b"). InnerVolumeSpecName "kube-api-access-b78rb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.151235 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0747847-802a-4a95-831a-301e1529a41b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f0747847-802a-4a95-831a-301e1529a41b" (UID: "f0747847-802a-4a95-831a-301e1529a41b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.246117 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b78rb\" (UniqueName: \"kubernetes.io/projected/f0747847-802a-4a95-831a-301e1529a41b-kube-api-access-b78rb\") on node \"crc\" DevicePath \"\"" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.246153 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0747847-802a-4a95-831a-301e1529a41b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.246163 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0747847-802a-4a95-831a-301e1529a41b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.775790 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" event={"ID":"f0747847-802a-4a95-831a-301e1529a41b","Type":"ContainerDied","Data":"cebfcff86ea5dc1531808f8235b92241d18a9a181c7753d16439286d3d29edf5"} Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.775860 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cebfcff86ea5dc1531808f8235b92241d18a9a181c7753d16439286d3d29edf5" Nov 28 14:30:03 crc kubenswrapper[4857]: I1128 14:30:03.776334 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d" Nov 28 14:30:04 crc kubenswrapper[4857]: I1128 14:30:04.240734 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d"] Nov 28 14:30:04 crc kubenswrapper[4857]: I1128 14:30:04.247620 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405625-rm96d"] Nov 28 14:30:06 crc kubenswrapper[4857]: I1128 14:30:06.245823 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae74a118-9633-40c1-8788-03ac27adef01" path="/var/lib/kubelet/pods/ae74a118-9633-40c1-8788-03ac27adef01/volumes" Nov 28 14:30:11 crc kubenswrapper[4857]: I1128 14:30:11.308777 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:30:11 crc kubenswrapper[4857]: I1128 14:30:11.310167 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:30:16 crc kubenswrapper[4857]: I1128 14:30:16.093065 4857 scope.go:117] "RemoveContainer" containerID="078b73789d96dda3a2a411da48214510e6d9a901f8d426e31808296793d96a20" Nov 28 14:30:41 crc kubenswrapper[4857]: I1128 14:30:41.308875 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:30:41 crc kubenswrapper[4857]: I1128 14:30:41.309661 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:31:11 crc kubenswrapper[4857]: I1128 14:31:11.308987 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:31:11 crc kubenswrapper[4857]: I1128 14:31:11.309624 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:31:11 crc kubenswrapper[4857]: I1128 14:31:11.309692 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:31:11 crc kubenswrapper[4857]: I1128 14:31:11.310599 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:31:11 crc kubenswrapper[4857]: I1128 14:31:11.310724 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" gracePeriod=600 Nov 28 14:31:11 crc kubenswrapper[4857]: E1128 14:31:11.467597 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:31:12 crc kubenswrapper[4857]: I1128 14:31:12.389457 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" exitCode=0 Nov 28 14:31:12 crc kubenswrapper[4857]: I1128 14:31:12.389508 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7"} Nov 28 14:31:12 crc kubenswrapper[4857]: I1128 14:31:12.389553 4857 scope.go:117] "RemoveContainer" containerID="33884a03db4de99def0383bbdf2b341b2c68bab325d69eb6823cce32ba9ee316" Nov 28 14:31:12 crc kubenswrapper[4857]: I1128 14:31:12.389976 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:31:12 crc kubenswrapper[4857]: E1128 14:31:12.390242 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:31:24 crc kubenswrapper[4857]: I1128 14:31:24.229066 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:31:24 crc kubenswrapper[4857]: E1128 14:31:24.229878 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:31:39 crc kubenswrapper[4857]: I1128 14:31:39.229688 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:31:39 crc kubenswrapper[4857]: E1128 14:31:39.231039 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:31:53 crc kubenswrapper[4857]: I1128 14:31:53.230251 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:31:53 crc kubenswrapper[4857]: E1128 14:31:53.231498 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:32:06 crc kubenswrapper[4857]: I1128 14:32:06.228765 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:32:06 crc kubenswrapper[4857]: E1128 14:32:06.229728 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:32:19 crc kubenswrapper[4857]: I1128 14:32:19.230037 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:32:19 crc kubenswrapper[4857]: E1128 14:32:19.231447 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:32:34 crc kubenswrapper[4857]: I1128 14:32:34.228681 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:32:34 crc kubenswrapper[4857]: E1128 14:32:34.229391 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:32:47 crc kubenswrapper[4857]: I1128 14:32:47.228217 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:32:47 crc kubenswrapper[4857]: E1128 14:32:47.228993 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:32:59 crc kubenswrapper[4857]: I1128 14:32:59.229396 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:32:59 crc kubenswrapper[4857]: E1128 14:32:59.230455 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:33:12 crc kubenswrapper[4857]: I1128 14:33:12.229297 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:33:12 crc kubenswrapper[4857]: E1128 14:33:12.231150 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:33:23 crc kubenswrapper[4857]: I1128 14:33:23.228308 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:33:23 crc kubenswrapper[4857]: E1128 14:33:23.229158 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:33:37 crc kubenswrapper[4857]: I1128 14:33:37.228678 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:33:37 crc kubenswrapper[4857]: E1128 14:33:37.229924 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:33:50 crc kubenswrapper[4857]: I1128 14:33:50.237677 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:33:50 crc kubenswrapper[4857]: E1128 14:33:50.238856 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.325021 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ntbk9"] Nov 28 14:33:59 crc kubenswrapper[4857]: E1128 14:33:59.326032 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0747847-802a-4a95-831a-301e1529a41b" containerName="collect-profiles" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.326045 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0747847-802a-4a95-831a-301e1529a41b" containerName="collect-profiles" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.326198 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0747847-802a-4a95-831a-301e1529a41b" containerName="collect-profiles" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.333485 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.338592 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ntbk9"] Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.497730 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-utilities\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.498050 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-catalog-content\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.498199 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqvk5\" (UniqueName: \"kubernetes.io/projected/fe9ea210-3c93-4f3e-b904-24d91eb269a0-kube-api-access-kqvk5\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.599605 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-catalog-content\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.599931 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqvk5\" (UniqueName: \"kubernetes.io/projected/fe9ea210-3c93-4f3e-b904-24d91eb269a0-kube-api-access-kqvk5\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.600014 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-utilities\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.600441 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-utilities\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.600616 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-catalog-content\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.621908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqvk5\" (UniqueName: \"kubernetes.io/projected/fe9ea210-3c93-4f3e-b904-24d91eb269a0-kube-api-access-kqvk5\") pod \"certified-operators-ntbk9\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:33:59 crc kubenswrapper[4857]: I1128 14:33:59.653434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:00 crc kubenswrapper[4857]: I1128 14:34:00.129707 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ntbk9"] Nov 28 14:34:00 crc kubenswrapper[4857]: W1128 14:34:00.140846 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe9ea210_3c93_4f3e_b904_24d91eb269a0.slice/crio-b26beb14a73122c38fd7ce1e7a2807f03b080905944937595569d23c055f0c09 WatchSource:0}: Error finding container b26beb14a73122c38fd7ce1e7a2807f03b080905944937595569d23c055f0c09: Status 404 returned error can't find the container with id b26beb14a73122c38fd7ce1e7a2807f03b080905944937595569d23c055f0c09 Nov 28 14:34:00 crc kubenswrapper[4857]: I1128 14:34:00.579166 4857 generic.go:334] "Generic (PLEG): container finished" podID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerID="0dc89611f9661f61deec076d58c7e13021082ce43a8d69affeb1398c2a2f33a9" exitCode=0 Nov 28 14:34:00 crc kubenswrapper[4857]: I1128 14:34:00.579244 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerDied","Data":"0dc89611f9661f61deec076d58c7e13021082ce43a8d69affeb1398c2a2f33a9"} Nov 28 14:34:00 crc kubenswrapper[4857]: I1128 14:34:00.579293 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerStarted","Data":"b26beb14a73122c38fd7ce1e7a2807f03b080905944937595569d23c055f0c09"} Nov 28 14:34:00 crc kubenswrapper[4857]: I1128 14:34:00.582519 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:34:01 crc kubenswrapper[4857]: I1128 14:34:01.601722 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerStarted","Data":"838cbc29991baa684cc008e4517fb989f3c03790f8d7d0146ce3861d38f1f22c"} Nov 28 14:34:02 crc kubenswrapper[4857]: I1128 14:34:02.614516 4857 generic.go:334] "Generic (PLEG): container finished" podID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerID="838cbc29991baa684cc008e4517fb989f3c03790f8d7d0146ce3861d38f1f22c" exitCode=0 Nov 28 14:34:02 crc kubenswrapper[4857]: I1128 14:34:02.614668 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerDied","Data":"838cbc29991baa684cc008e4517fb989f3c03790f8d7d0146ce3861d38f1f22c"} Nov 28 14:34:03 crc kubenswrapper[4857]: I1128 14:34:03.627967 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerStarted","Data":"47e0803abdb5c7355dd1d4b8bd16496a5777f780686a622e5792bac74c743944"} Nov 28 14:34:03 crc kubenswrapper[4857]: I1128 14:34:03.649480 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ntbk9" podStartSLOduration=2.049142614 podStartE2EDuration="4.649461329s" podCreationTimestamp="2025-11-28 14:33:59 +0000 UTC" firstStartedPulling="2025-11-28 14:34:00.58222134 +0000 UTC m=+3890.706162787" lastFinishedPulling="2025-11-28 14:34:03.182540065 +0000 UTC m=+3893.306481502" observedRunningTime="2025-11-28 14:34:03.643301375 +0000 UTC m=+3893.767242812" watchObservedRunningTime="2025-11-28 14:34:03.649461329 +0000 UTC m=+3893.773402756" Nov 28 14:34:04 crc kubenswrapper[4857]: I1128 14:34:04.229056 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:34:04 crc kubenswrapper[4857]: E1128 14:34:04.229537 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:34:09 crc kubenswrapper[4857]: I1128 14:34:09.653798 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:09 crc kubenswrapper[4857]: I1128 14:34:09.654788 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:09 crc kubenswrapper[4857]: I1128 14:34:09.744769 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:09 crc kubenswrapper[4857]: I1128 14:34:09.812094 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:12 crc kubenswrapper[4857]: I1128 14:34:12.315373 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ntbk9"] Nov 28 14:34:12 crc kubenswrapper[4857]: I1128 14:34:12.316187 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ntbk9" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="registry-server" containerID="cri-o://47e0803abdb5c7355dd1d4b8bd16496a5777f780686a622e5792bac74c743944" gracePeriod=2 Nov 28 14:34:12 crc kubenswrapper[4857]: E1128 14:34:12.469982 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfe9ea210_3c93_4f3e_b904_24d91eb269a0.slice/crio-47e0803abdb5c7355dd1d4b8bd16496a5777f780686a622e5792bac74c743944.scope\": RecentStats: unable to find data in memory cache]" Nov 28 14:34:12 crc kubenswrapper[4857]: I1128 14:34:12.711439 4857 generic.go:334] "Generic (PLEG): container finished" podID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerID="47e0803abdb5c7355dd1d4b8bd16496a5777f780686a622e5792bac74c743944" exitCode=0 Nov 28 14:34:12 crc kubenswrapper[4857]: I1128 14:34:12.711502 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerDied","Data":"47e0803abdb5c7355dd1d4b8bd16496a5777f780686a622e5792bac74c743944"} Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.271750 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.409999 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-utilities\") pod \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.410181 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-catalog-content\") pod \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.410219 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqvk5\" (UniqueName: \"kubernetes.io/projected/fe9ea210-3c93-4f3e-b904-24d91eb269a0-kube-api-access-kqvk5\") pod \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\" (UID: \"fe9ea210-3c93-4f3e-b904-24d91eb269a0\") " Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.411351 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-utilities" (OuterVolumeSpecName: "utilities") pod "fe9ea210-3c93-4f3e-b904-24d91eb269a0" (UID: "fe9ea210-3c93-4f3e-b904-24d91eb269a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.418228 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe9ea210-3c93-4f3e-b904-24d91eb269a0-kube-api-access-kqvk5" (OuterVolumeSpecName: "kube-api-access-kqvk5") pod "fe9ea210-3c93-4f3e-b904-24d91eb269a0" (UID: "fe9ea210-3c93-4f3e-b904-24d91eb269a0"). InnerVolumeSpecName "kube-api-access-kqvk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.481274 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fe9ea210-3c93-4f3e-b904-24d91eb269a0" (UID: "fe9ea210-3c93-4f3e-b904-24d91eb269a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.511801 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.511846 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fe9ea210-3c93-4f3e-b904-24d91eb269a0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.511862 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqvk5\" (UniqueName: \"kubernetes.io/projected/fe9ea210-3c93-4f3e-b904-24d91eb269a0-kube-api-access-kqvk5\") on node \"crc\" DevicePath \"\"" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.722852 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ntbk9" event={"ID":"fe9ea210-3c93-4f3e-b904-24d91eb269a0","Type":"ContainerDied","Data":"b26beb14a73122c38fd7ce1e7a2807f03b080905944937595569d23c055f0c09"} Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.722908 4857 scope.go:117] "RemoveContainer" containerID="47e0803abdb5c7355dd1d4b8bd16496a5777f780686a622e5792bac74c743944" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.722939 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ntbk9" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.751091 4857 scope.go:117] "RemoveContainer" containerID="838cbc29991baa684cc008e4517fb989f3c03790f8d7d0146ce3861d38f1f22c" Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.770831 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ntbk9"] Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.779311 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ntbk9"] Nov 28 14:34:13 crc kubenswrapper[4857]: I1128 14:34:13.782508 4857 scope.go:117] "RemoveContainer" containerID="0dc89611f9661f61deec076d58c7e13021082ce43a8d69affeb1398c2a2f33a9" Nov 28 14:34:14 crc kubenswrapper[4857]: I1128 14:34:14.245552 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" path="/var/lib/kubelet/pods/fe9ea210-3c93-4f3e-b904-24d91eb269a0/volumes" Nov 28 14:34:16 crc kubenswrapper[4857]: I1128 14:34:16.228994 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:34:16 crc kubenswrapper[4857]: E1128 14:34:16.229442 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:34:30 crc kubenswrapper[4857]: I1128 14:34:30.238058 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:34:30 crc kubenswrapper[4857]: E1128 14:34:30.239268 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:34:41 crc kubenswrapper[4857]: I1128 14:34:41.229495 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:34:41 crc kubenswrapper[4857]: E1128 14:34:41.230586 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:34:53 crc kubenswrapper[4857]: I1128 14:34:53.229250 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:34:53 crc kubenswrapper[4857]: E1128 14:34:53.230110 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:35:06 crc kubenswrapper[4857]: I1128 14:35:06.229700 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:35:06 crc kubenswrapper[4857]: E1128 14:35:06.230643 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:35:18 crc kubenswrapper[4857]: I1128 14:35:18.228589 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:35:18 crc kubenswrapper[4857]: E1128 14:35:18.229526 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:35:30 crc kubenswrapper[4857]: I1128 14:35:30.241020 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:35:30 crc kubenswrapper[4857]: E1128 14:35:30.242028 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:35:42 crc kubenswrapper[4857]: I1128 14:35:42.229054 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:35:42 crc kubenswrapper[4857]: E1128 14:35:42.230373 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:35:55 crc kubenswrapper[4857]: I1128 14:35:55.230223 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:35:55 crc kubenswrapper[4857]: E1128 14:35:55.231679 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:36:08 crc kubenswrapper[4857]: I1128 14:36:08.229240 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:36:08 crc kubenswrapper[4857]: E1128 14:36:08.231262 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:36:23 crc kubenswrapper[4857]: I1128 14:36:23.228826 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:36:23 crc kubenswrapper[4857]: I1128 14:36:23.800311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"6eb13e547910b72d042a720b3091ecc2ce1a2ffeb08a41a1cd1730a95742cc82"} Nov 28 14:36:28 crc kubenswrapper[4857]: I1128 14:36:28.997674 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dtjg6"] Nov 28 14:36:28 crc kubenswrapper[4857]: E1128 14:36:28.998666 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="extract-content" Nov 28 14:36:28 crc kubenswrapper[4857]: I1128 14:36:28.998684 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="extract-content" Nov 28 14:36:28 crc kubenswrapper[4857]: E1128 14:36:28.998704 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="extract-utilities" Nov 28 14:36:28 crc kubenswrapper[4857]: I1128 14:36:28.998712 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="extract-utilities" Nov 28 14:36:28 crc kubenswrapper[4857]: E1128 14:36:28.998738 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="registry-server" Nov 28 14:36:28 crc kubenswrapper[4857]: I1128 14:36:28.998746 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="registry-server" Nov 28 14:36:28 crc kubenswrapper[4857]: I1128 14:36:28.998936 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe9ea210-3c93-4f3e-b904-24d91eb269a0" containerName="registry-server" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.000184 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.015589 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtjg6"] Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.142054 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-utilities\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.142145 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkld4\" (UniqueName: \"kubernetes.io/projected/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-kube-api-access-hkld4\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.142438 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-catalog-content\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.243685 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkld4\" (UniqueName: \"kubernetes.io/projected/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-kube-api-access-hkld4\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.243791 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-catalog-content\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.243871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-utilities\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.244495 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-catalog-content\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.244556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-utilities\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.264405 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkld4\" (UniqueName: \"kubernetes.io/projected/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-kube-api-access-hkld4\") pod \"redhat-marketplace-dtjg6\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.321983 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.793783 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtjg6"] Nov 28 14:36:29 crc kubenswrapper[4857]: I1128 14:36:29.851788 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtjg6" event={"ID":"76b59b48-f6fb-4c6b-a4f6-3906b5669adb","Type":"ContainerStarted","Data":"25d4a5653b0137ed651e4cf1ea96e520f2c1065c16c5108b111d6f7166b38eed"} Nov 28 14:36:30 crc kubenswrapper[4857]: I1128 14:36:30.865460 4857 generic.go:334] "Generic (PLEG): container finished" podID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerID="f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d" exitCode=0 Nov 28 14:36:30 crc kubenswrapper[4857]: I1128 14:36:30.865561 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtjg6" event={"ID":"76b59b48-f6fb-4c6b-a4f6-3906b5669adb","Type":"ContainerDied","Data":"f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d"} Nov 28 14:36:31 crc kubenswrapper[4857]: I1128 14:36:31.880390 4857 generic.go:334] "Generic (PLEG): container finished" podID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerID="7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a" exitCode=0 Nov 28 14:36:31 crc kubenswrapper[4857]: I1128 14:36:31.880440 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtjg6" event={"ID":"76b59b48-f6fb-4c6b-a4f6-3906b5669adb","Type":"ContainerDied","Data":"7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a"} Nov 28 14:36:32 crc kubenswrapper[4857]: I1128 14:36:32.901272 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtjg6" event={"ID":"76b59b48-f6fb-4c6b-a4f6-3906b5669adb","Type":"ContainerStarted","Data":"3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3"} Nov 28 14:36:32 crc kubenswrapper[4857]: I1128 14:36:32.929488 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dtjg6" podStartSLOduration=3.185880449 podStartE2EDuration="4.929459873s" podCreationTimestamp="2025-11-28 14:36:28 +0000 UTC" firstStartedPulling="2025-11-28 14:36:30.86986685 +0000 UTC m=+4040.993808297" lastFinishedPulling="2025-11-28 14:36:32.613446244 +0000 UTC m=+4042.737387721" observedRunningTime="2025-11-28 14:36:32.925196619 +0000 UTC m=+4043.049138066" watchObservedRunningTime="2025-11-28 14:36:32.929459873 +0000 UTC m=+4043.053401320" Nov 28 14:36:39 crc kubenswrapper[4857]: I1128 14:36:39.322355 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:39 crc kubenswrapper[4857]: I1128 14:36:39.322962 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:39 crc kubenswrapper[4857]: I1128 14:36:39.361812 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:40 crc kubenswrapper[4857]: I1128 14:36:40.001462 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:40 crc kubenswrapper[4857]: I1128 14:36:40.057123 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtjg6"] Nov 28 14:36:41 crc kubenswrapper[4857]: I1128 14:36:41.976815 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dtjg6" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="registry-server" containerID="cri-o://3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3" gracePeriod=2 Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.015983 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ng54x"] Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.018473 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.031994 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ng54x"] Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.139526 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5r6ml\" (UniqueName: \"kubernetes.io/projected/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-kube-api-access-5r6ml\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.139583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-catalog-content\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.139806 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-utilities\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.241342 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-utilities\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.241403 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5r6ml\" (UniqueName: \"kubernetes.io/projected/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-kube-api-access-5r6ml\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.241442 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-catalog-content\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.241880 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-utilities\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.241924 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-catalog-content\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.265424 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5r6ml\" (UniqueName: \"kubernetes.io/projected/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-kube-api-access-5r6ml\") pod \"community-operators-ng54x\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.359481 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.859872 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.867064 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ng54x"] Nov 28 14:36:42 crc kubenswrapper[4857]: W1128 14:36:42.870482 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f7725d5_2b6c_4bc7_b202_95afd7ef388c.slice/crio-e5db6780464401de2d7101c4afd81ed936e96efb674bedaa7fe24157af252021 WatchSource:0}: Error finding container e5db6780464401de2d7101c4afd81ed936e96efb674bedaa7fe24157af252021: Status 404 returned error can't find the container with id e5db6780464401de2d7101c4afd81ed936e96efb674bedaa7fe24157af252021 Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.949460 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-utilities\") pod \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.949502 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-catalog-content\") pod \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.949563 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkld4\" (UniqueName: \"kubernetes.io/projected/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-kube-api-access-hkld4\") pod \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\" (UID: \"76b59b48-f6fb-4c6b-a4f6-3906b5669adb\") " Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.950331 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-utilities" (OuterVolumeSpecName: "utilities") pod "76b59b48-f6fb-4c6b-a4f6-3906b5669adb" (UID: "76b59b48-f6fb-4c6b-a4f6-3906b5669adb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.956054 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-kube-api-access-hkld4" (OuterVolumeSpecName: "kube-api-access-hkld4") pod "76b59b48-f6fb-4c6b-a4f6-3906b5669adb" (UID: "76b59b48-f6fb-4c6b-a4f6-3906b5669adb"). InnerVolumeSpecName "kube-api-access-hkld4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.982963 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "76b59b48-f6fb-4c6b-a4f6-3906b5669adb" (UID: "76b59b48-f6fb-4c6b-a4f6-3906b5669adb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.986013 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerStarted","Data":"e5db6780464401de2d7101c4afd81ed936e96efb674bedaa7fe24157af252021"} Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.990147 4857 generic.go:334] "Generic (PLEG): container finished" podID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerID="3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3" exitCode=0 Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.990240 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dtjg6" Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.990234 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtjg6" event={"ID":"76b59b48-f6fb-4c6b-a4f6-3906b5669adb","Type":"ContainerDied","Data":"3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3"} Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.990425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dtjg6" event={"ID":"76b59b48-f6fb-4c6b-a4f6-3906b5669adb","Type":"ContainerDied","Data":"25d4a5653b0137ed651e4cf1ea96e520f2c1065c16c5108b111d6f7166b38eed"} Nov 28 14:36:42 crc kubenswrapper[4857]: I1128 14:36:42.990470 4857 scope.go:117] "RemoveContainer" containerID="3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.014038 4857 scope.go:117] "RemoveContainer" containerID="7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.029977 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtjg6"] Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.035415 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dtjg6"] Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.050989 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.051026 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.051043 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkld4\" (UniqueName: \"kubernetes.io/projected/76b59b48-f6fb-4c6b-a4f6-3906b5669adb-kube-api-access-hkld4\") on node \"crc\" DevicePath \"\"" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.097594 4857 scope.go:117] "RemoveContainer" containerID="f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.151602 4857 scope.go:117] "RemoveContainer" containerID="3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3" Nov 28 14:36:43 crc kubenswrapper[4857]: E1128 14:36:43.152413 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3\": container with ID starting with 3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3 not found: ID does not exist" containerID="3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.152523 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3"} err="failed to get container status \"3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3\": rpc error: code = NotFound desc = could not find container \"3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3\": container with ID starting with 3dbe53cc0bc4c5f090620f11c346bf6570617abb21b8cf20079400e3e2f519f3 not found: ID does not exist" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.152552 4857 scope.go:117] "RemoveContainer" containerID="7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a" Nov 28 14:36:43 crc kubenswrapper[4857]: E1128 14:36:43.153074 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a\": container with ID starting with 7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a not found: ID does not exist" containerID="7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.153130 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a"} err="failed to get container status \"7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a\": rpc error: code = NotFound desc = could not find container \"7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a\": container with ID starting with 7e585adc4266689a6ae828d8108c127b628d1f5c1f2af705195f2180b1c6633a not found: ID does not exist" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.153153 4857 scope.go:117] "RemoveContainer" containerID="f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d" Nov 28 14:36:43 crc kubenswrapper[4857]: E1128 14:36:43.153740 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d\": container with ID starting with f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d not found: ID does not exist" containerID="f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d" Nov 28 14:36:43 crc kubenswrapper[4857]: I1128 14:36:43.153774 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d"} err="failed to get container status \"f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d\": rpc error: code = NotFound desc = could not find container \"f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d\": container with ID starting with f396ee4ea3ce40051193b8cd5870c135ad85c08bccbc6833f0499a24bb421e7d not found: ID does not exist" Nov 28 14:36:44 crc kubenswrapper[4857]: I1128 14:36:44.002130 4857 generic.go:334] "Generic (PLEG): container finished" podID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerID="ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d" exitCode=0 Nov 28 14:36:44 crc kubenswrapper[4857]: I1128 14:36:44.002171 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerDied","Data":"ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d"} Nov 28 14:36:44 crc kubenswrapper[4857]: I1128 14:36:44.240206 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" path="/var/lib/kubelet/pods/76b59b48-f6fb-4c6b-a4f6-3906b5669adb/volumes" Nov 28 14:36:45 crc kubenswrapper[4857]: I1128 14:36:45.012612 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerStarted","Data":"9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64"} Nov 28 14:36:46 crc kubenswrapper[4857]: I1128 14:36:46.023178 4857 generic.go:334] "Generic (PLEG): container finished" podID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerID="9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64" exitCode=0 Nov 28 14:36:46 crc kubenswrapper[4857]: I1128 14:36:46.023376 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerDied","Data":"9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64"} Nov 28 14:36:47 crc kubenswrapper[4857]: I1128 14:36:47.032117 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerStarted","Data":"98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb"} Nov 28 14:36:47 crc kubenswrapper[4857]: I1128 14:36:47.055654 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ng54x" podStartSLOduration=3.513997807 podStartE2EDuration="6.055633328s" podCreationTimestamp="2025-11-28 14:36:41 +0000 UTC" firstStartedPulling="2025-11-28 14:36:44.004729874 +0000 UTC m=+4054.128671321" lastFinishedPulling="2025-11-28 14:36:46.546365375 +0000 UTC m=+4056.670306842" observedRunningTime="2025-11-28 14:36:47.047924073 +0000 UTC m=+4057.171865510" watchObservedRunningTime="2025-11-28 14:36:47.055633328 +0000 UTC m=+4057.179574775" Nov 28 14:36:52 crc kubenswrapper[4857]: I1128 14:36:52.360197 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:52 crc kubenswrapper[4857]: I1128 14:36:52.360612 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:52 crc kubenswrapper[4857]: I1128 14:36:52.410994 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:53 crc kubenswrapper[4857]: I1128 14:36:53.124586 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:53 crc kubenswrapper[4857]: I1128 14:36:53.173547 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ng54x"] Nov 28 14:36:55 crc kubenswrapper[4857]: I1128 14:36:55.096922 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ng54x" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="registry-server" containerID="cri-o://98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb" gracePeriod=2 Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.008344 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.105866 4857 generic.go:334] "Generic (PLEG): container finished" podID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerID="98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb" exitCode=0 Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.105925 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerDied","Data":"98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb"} Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.105956 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ng54x" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.105977 4857 scope.go:117] "RemoveContainer" containerID="98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.105965 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ng54x" event={"ID":"0f7725d5-2b6c-4bc7-b202-95afd7ef388c","Type":"ContainerDied","Data":"e5db6780464401de2d7101c4afd81ed936e96efb674bedaa7fe24157af252021"} Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.124320 4857 scope.go:117] "RemoveContainer" containerID="9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.132205 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-catalog-content\") pod \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.132390 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5r6ml\" (UniqueName: \"kubernetes.io/projected/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-kube-api-access-5r6ml\") pod \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.132452 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-utilities\") pod \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\" (UID: \"0f7725d5-2b6c-4bc7-b202-95afd7ef388c\") " Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.133792 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-utilities" (OuterVolumeSpecName: "utilities") pod "0f7725d5-2b6c-4bc7-b202-95afd7ef388c" (UID: "0f7725d5-2b6c-4bc7-b202-95afd7ef388c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.139719 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-kube-api-access-5r6ml" (OuterVolumeSpecName: "kube-api-access-5r6ml") pod "0f7725d5-2b6c-4bc7-b202-95afd7ef388c" (UID: "0f7725d5-2b6c-4bc7-b202-95afd7ef388c"). InnerVolumeSpecName "kube-api-access-5r6ml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.146132 4857 scope.go:117] "RemoveContainer" containerID="ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.184256 4857 scope.go:117] "RemoveContainer" containerID="98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb" Nov 28 14:36:56 crc kubenswrapper[4857]: E1128 14:36:56.185185 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb\": container with ID starting with 98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb not found: ID does not exist" containerID="98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.185232 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb"} err="failed to get container status \"98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb\": rpc error: code = NotFound desc = could not find container \"98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb\": container with ID starting with 98316cedcbf05e9a9842f5afd5b9611a334e93c52b1c61c0d766d1579c6bbffb not found: ID does not exist" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.185256 4857 scope.go:117] "RemoveContainer" containerID="9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.185338 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0f7725d5-2b6c-4bc7-b202-95afd7ef388c" (UID: "0f7725d5-2b6c-4bc7-b202-95afd7ef388c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:36:56 crc kubenswrapper[4857]: E1128 14:36:56.185676 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64\": container with ID starting with 9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64 not found: ID does not exist" containerID="9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.185704 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64"} err="failed to get container status \"9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64\": rpc error: code = NotFound desc = could not find container \"9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64\": container with ID starting with 9f30fbd690dd2f1b32650a310d3506e69243adb40ccdbc4c4b8d19b4ed20af64 not found: ID does not exist" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.185717 4857 scope.go:117] "RemoveContainer" containerID="ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d" Nov 28 14:36:56 crc kubenswrapper[4857]: E1128 14:36:56.186116 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d\": container with ID starting with ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d not found: ID does not exist" containerID="ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.186145 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d"} err="failed to get container status \"ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d\": rpc error: code = NotFound desc = could not find container \"ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d\": container with ID starting with ea9e719cede08aca136c396c0ae854081cd5b1313e19019b3802da6399445b5d not found: ID does not exist" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.233650 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.233687 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5r6ml\" (UniqueName: \"kubernetes.io/projected/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-kube-api-access-5r6ml\") on node \"crc\" DevicePath \"\"" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.233700 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f7725d5-2b6c-4bc7-b202-95afd7ef388c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.445807 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ng54x"] Nov 28 14:36:56 crc kubenswrapper[4857]: I1128 14:36:56.458400 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ng54x"] Nov 28 14:36:58 crc kubenswrapper[4857]: I1128 14:36:58.240713 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" path="/var/lib/kubelet/pods/0f7725d5-2b6c-4bc7-b202-95afd7ef388c/volumes" Nov 28 14:38:41 crc kubenswrapper[4857]: I1128 14:38:41.308460 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:38:41 crc kubenswrapper[4857]: I1128 14:38:41.309087 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:39:11 crc kubenswrapper[4857]: I1128 14:39:11.309005 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:39:11 crc kubenswrapper[4857]: I1128 14:39:11.309620 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:39:41 crc kubenswrapper[4857]: I1128 14:39:41.309157 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:39:41 crc kubenswrapper[4857]: I1128 14:39:41.309915 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:39:41 crc kubenswrapper[4857]: I1128 14:39:41.310020 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:39:41 crc kubenswrapper[4857]: I1128 14:39:41.311024 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6eb13e547910b72d042a720b3091ecc2ce1a2ffeb08a41a1cd1730a95742cc82"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:39:41 crc kubenswrapper[4857]: I1128 14:39:41.311119 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://6eb13e547910b72d042a720b3091ecc2ce1a2ffeb08a41a1cd1730a95742cc82" gracePeriod=600 Nov 28 14:39:42 crc kubenswrapper[4857]: I1128 14:39:42.232298 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="6eb13e547910b72d042a720b3091ecc2ce1a2ffeb08a41a1cd1730a95742cc82" exitCode=0 Nov 28 14:39:42 crc kubenswrapper[4857]: I1128 14:39:42.238560 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"6eb13e547910b72d042a720b3091ecc2ce1a2ffeb08a41a1cd1730a95742cc82"} Nov 28 14:39:42 crc kubenswrapper[4857]: I1128 14:39:42.238616 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a"} Nov 28 14:39:42 crc kubenswrapper[4857]: I1128 14:39:42.238636 4857 scope.go:117] "RemoveContainer" containerID="408ecac3a9253fbfbfc9eb93cc159bfb672675065a40069db743517006e942c7" Nov 28 14:42:11 crc kubenswrapper[4857]: I1128 14:42:11.308576 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:42:11 crc kubenswrapper[4857]: I1128 14:42:11.310033 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:42:41 crc kubenswrapper[4857]: I1128 14:42:41.309001 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:42:41 crc kubenswrapper[4857]: I1128 14:42:41.309702 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.017293 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hnlqw"] Nov 28 14:42:57 crc kubenswrapper[4857]: E1128 14:42:57.018999 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="extract-utilities" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019031 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="extract-utilities" Nov 28 14:42:57 crc kubenswrapper[4857]: E1128 14:42:57.019078 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="extract-content" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019099 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="extract-content" Nov 28 14:42:57 crc kubenswrapper[4857]: E1128 14:42:57.019149 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="extract-content" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019167 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="extract-content" Nov 28 14:42:57 crc kubenswrapper[4857]: E1128 14:42:57.019206 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="extract-utilities" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019226 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="extract-utilities" Nov 28 14:42:57 crc kubenswrapper[4857]: E1128 14:42:57.019252 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="registry-server" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019269 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="registry-server" Nov 28 14:42:57 crc kubenswrapper[4857]: E1128 14:42:57.019296 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="registry-server" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019313 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="registry-server" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019685 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="76b59b48-f6fb-4c6b-a4f6-3906b5669adb" containerName="registry-server" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.019754 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f7725d5-2b6c-4bc7-b202-95afd7ef388c" containerName="registry-server" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.022397 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.030463 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-catalog-content\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.030540 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sg27h\" (UniqueName: \"kubernetes.io/projected/e0cd9e0f-528c-464a-9507-9197bc1021e6-kube-api-access-sg27h\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.030756 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-utilities\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.040483 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hnlqw"] Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.134171 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-catalog-content\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.134240 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sg27h\" (UniqueName: \"kubernetes.io/projected/e0cd9e0f-528c-464a-9507-9197bc1021e6-kube-api-access-sg27h\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.134350 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-utilities\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.134917 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-catalog-content\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.135077 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-utilities\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.160770 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sg27h\" (UniqueName: \"kubernetes.io/projected/e0cd9e0f-528c-464a-9507-9197bc1021e6-kube-api-access-sg27h\") pod \"redhat-operators-hnlqw\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.346685 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.592500 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hnlqw"] Nov 28 14:42:57 crc kubenswrapper[4857]: I1128 14:42:57.766892 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerStarted","Data":"f196d2d6a3770079bdd11e10c57b0a1a4f478240e05d9f9597d5a68959f96a8e"} Nov 28 14:42:58 crc kubenswrapper[4857]: I1128 14:42:58.774476 4857 generic.go:334] "Generic (PLEG): container finished" podID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerID="e69024de1e06d31f1879ff7c8b5a26a6dc42d04194db87c37dcdce765913af64" exitCode=0 Nov 28 14:42:58 crc kubenswrapper[4857]: I1128 14:42:58.774527 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerDied","Data":"e69024de1e06d31f1879ff7c8b5a26a6dc42d04194db87c37dcdce765913af64"} Nov 28 14:42:58 crc kubenswrapper[4857]: I1128 14:42:58.777045 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:42:59 crc kubenswrapper[4857]: I1128 14:42:59.783510 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerStarted","Data":"20f8fba3bdbddb1fe74fa814cbb5e3d4b6f37d9dc2140c49490182de0b3d205e"} Nov 28 14:43:00 crc kubenswrapper[4857]: I1128 14:43:00.792267 4857 generic.go:334] "Generic (PLEG): container finished" podID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerID="20f8fba3bdbddb1fe74fa814cbb5e3d4b6f37d9dc2140c49490182de0b3d205e" exitCode=0 Nov 28 14:43:00 crc kubenswrapper[4857]: I1128 14:43:00.792369 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerDied","Data":"20f8fba3bdbddb1fe74fa814cbb5e3d4b6f37d9dc2140c49490182de0b3d205e"} Nov 28 14:43:01 crc kubenswrapper[4857]: I1128 14:43:01.803731 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerStarted","Data":"c8763fa2a5964c4484a4a441c74c136a9a268abbe0b2bd7b607d0e57a86df20b"} Nov 28 14:43:01 crc kubenswrapper[4857]: I1128 14:43:01.827116 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hnlqw" podStartSLOduration=3.337030392 podStartE2EDuration="5.827091536s" podCreationTimestamp="2025-11-28 14:42:56 +0000 UTC" firstStartedPulling="2025-11-28 14:42:58.776791027 +0000 UTC m=+4428.900732454" lastFinishedPulling="2025-11-28 14:43:01.266852151 +0000 UTC m=+4431.390793598" observedRunningTime="2025-11-28 14:43:01.821521898 +0000 UTC m=+4431.945463335" watchObservedRunningTime="2025-11-28 14:43:01.827091536 +0000 UTC m=+4431.951032973" Nov 28 14:43:07 crc kubenswrapper[4857]: I1128 14:43:07.347868 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:43:07 crc kubenswrapper[4857]: I1128 14:43:07.350696 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:43:07 crc kubenswrapper[4857]: I1128 14:43:07.414467 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:43:07 crc kubenswrapper[4857]: I1128 14:43:07.888904 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:43:07 crc kubenswrapper[4857]: I1128 14:43:07.942007 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hnlqw"] Nov 28 14:43:09 crc kubenswrapper[4857]: I1128 14:43:09.859671 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hnlqw" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="registry-server" containerID="cri-o://c8763fa2a5964c4484a4a441c74c136a9a268abbe0b2bd7b607d0e57a86df20b" gracePeriod=2 Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.308478 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.308548 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.308608 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.309326 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.309393 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" gracePeriod=600 Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.886567 4857 generic.go:334] "Generic (PLEG): container finished" podID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerID="c8763fa2a5964c4484a4a441c74c136a9a268abbe0b2bd7b607d0e57a86df20b" exitCode=0 Nov 28 14:43:11 crc kubenswrapper[4857]: I1128 14:43:11.886758 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerDied","Data":"c8763fa2a5964c4484a4a441c74c136a9a268abbe0b2bd7b607d0e57a86df20b"} Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.394367 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:43:12 crc kubenswrapper[4857]: E1128 14:43:12.541919 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.557053 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sg27h\" (UniqueName: \"kubernetes.io/projected/e0cd9e0f-528c-464a-9507-9197bc1021e6-kube-api-access-sg27h\") pod \"e0cd9e0f-528c-464a-9507-9197bc1021e6\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.557114 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-utilities\") pod \"e0cd9e0f-528c-464a-9507-9197bc1021e6\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.557150 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-catalog-content\") pod \"e0cd9e0f-528c-464a-9507-9197bc1021e6\" (UID: \"e0cd9e0f-528c-464a-9507-9197bc1021e6\") " Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.558298 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-utilities" (OuterVolumeSpecName: "utilities") pod "e0cd9e0f-528c-464a-9507-9197bc1021e6" (UID: "e0cd9e0f-528c-464a-9507-9197bc1021e6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.568141 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0cd9e0f-528c-464a-9507-9197bc1021e6-kube-api-access-sg27h" (OuterVolumeSpecName: "kube-api-access-sg27h") pod "e0cd9e0f-528c-464a-9507-9197bc1021e6" (UID: "e0cd9e0f-528c-464a-9507-9197bc1021e6"). InnerVolumeSpecName "kube-api-access-sg27h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.659450 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sg27h\" (UniqueName: \"kubernetes.io/projected/e0cd9e0f-528c-464a-9507-9197bc1021e6-kube-api-access-sg27h\") on node \"crc\" DevicePath \"\"" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.659501 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.678298 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e0cd9e0f-528c-464a-9507-9197bc1021e6" (UID: "e0cd9e0f-528c-464a-9507-9197bc1021e6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.760328 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e0cd9e0f-528c-464a-9507-9197bc1021e6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.896289 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hnlqw" event={"ID":"e0cd9e0f-528c-464a-9507-9197bc1021e6","Type":"ContainerDied","Data":"f196d2d6a3770079bdd11e10c57b0a1a4f478240e05d9f9597d5a68959f96a8e"} Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.896319 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hnlqw" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.896338 4857 scope.go:117] "RemoveContainer" containerID="c8763fa2a5964c4484a4a441c74c136a9a268abbe0b2bd7b607d0e57a86df20b" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.899192 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" exitCode=0 Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.899286 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a"} Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.900356 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:43:12 crc kubenswrapper[4857]: E1128 14:43:12.900738 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.920135 4857 scope.go:117] "RemoveContainer" containerID="20f8fba3bdbddb1fe74fa814cbb5e3d4b6f37d9dc2140c49490182de0b3d205e" Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.938647 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hnlqw"] Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.946126 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hnlqw"] Nov 28 14:43:12 crc kubenswrapper[4857]: I1128 14:43:12.974372 4857 scope.go:117] "RemoveContainer" containerID="e69024de1e06d31f1879ff7c8b5a26a6dc42d04194db87c37dcdce765913af64" Nov 28 14:43:13 crc kubenswrapper[4857]: I1128 14:43:13.006269 4857 scope.go:117] "RemoveContainer" containerID="6eb13e547910b72d042a720b3091ecc2ce1a2ffeb08a41a1cd1730a95742cc82" Nov 28 14:43:14 crc kubenswrapper[4857]: I1128 14:43:14.238334 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" path="/var/lib/kubelet/pods/e0cd9e0f-528c-464a-9507-9197bc1021e6/volumes" Nov 28 14:43:27 crc kubenswrapper[4857]: I1128 14:43:27.228898 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:43:27 crc kubenswrapper[4857]: E1128 14:43:27.230123 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:43:41 crc kubenswrapper[4857]: I1128 14:43:41.229017 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:43:41 crc kubenswrapper[4857]: E1128 14:43:41.230121 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:43:52 crc kubenswrapper[4857]: I1128 14:43:52.229247 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:43:52 crc kubenswrapper[4857]: E1128 14:43:52.230099 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:44:07 crc kubenswrapper[4857]: I1128 14:44:07.229454 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:44:07 crc kubenswrapper[4857]: E1128 14:44:07.230222 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:44:20 crc kubenswrapper[4857]: I1128 14:44:20.232814 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:44:20 crc kubenswrapper[4857]: E1128 14:44:20.233484 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:44:33 crc kubenswrapper[4857]: I1128 14:44:33.229345 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:44:33 crc kubenswrapper[4857]: E1128 14:44:33.230395 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.489469 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-mgnrw"] Nov 28 14:44:38 crc kubenswrapper[4857]: E1128 14:44:38.490433 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="extract-content" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.490448 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="extract-content" Nov 28 14:44:38 crc kubenswrapper[4857]: E1128 14:44:38.490460 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="registry-server" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.490469 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="registry-server" Nov 28 14:44:38 crc kubenswrapper[4857]: E1128 14:44:38.490496 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="extract-utilities" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.490505 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="extract-utilities" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.490680 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0cd9e0f-528c-464a-9507-9197bc1021e6" containerName="registry-server" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.491970 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.512308 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mgnrw"] Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.550019 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9jd8\" (UniqueName: \"kubernetes.io/projected/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-kube-api-access-d9jd8\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.550116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-catalog-content\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.550153 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-utilities\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.651346 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9jd8\" (UniqueName: \"kubernetes.io/projected/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-kube-api-access-d9jd8\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.651422 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-catalog-content\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.651452 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-utilities\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.652174 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-utilities\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.652504 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-catalog-content\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.676637 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9jd8\" (UniqueName: \"kubernetes.io/projected/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-kube-api-access-d9jd8\") pod \"certified-operators-mgnrw\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:38 crc kubenswrapper[4857]: I1128 14:44:38.815612 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:39 crc kubenswrapper[4857]: I1128 14:44:39.360993 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-mgnrw"] Nov 28 14:44:39 crc kubenswrapper[4857]: I1128 14:44:39.589819 4857 generic.go:334] "Generic (PLEG): container finished" podID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerID="c252d149edae3986657cc17c87a7c28e66938de251424204c68414b589efcf3a" exitCode=0 Nov 28 14:44:39 crc kubenswrapper[4857]: I1128 14:44:39.589876 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mgnrw" event={"ID":"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f","Type":"ContainerDied","Data":"c252d149edae3986657cc17c87a7c28e66938de251424204c68414b589efcf3a"} Nov 28 14:44:39 crc kubenswrapper[4857]: I1128 14:44:39.589931 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mgnrw" event={"ID":"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f","Type":"ContainerStarted","Data":"76ee1fde0add1aafa5cc7c795d3b87487d4cc2d9c61685e6435d33d782d286a6"} Nov 28 14:44:41 crc kubenswrapper[4857]: I1128 14:44:41.612088 4857 generic.go:334] "Generic (PLEG): container finished" podID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerID="b3b4788f8ed251ddc668b5e3edcdac105c32045d18a5126a6a1f1332520e7e5a" exitCode=0 Nov 28 14:44:41 crc kubenswrapper[4857]: I1128 14:44:41.612194 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mgnrw" event={"ID":"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f","Type":"ContainerDied","Data":"b3b4788f8ed251ddc668b5e3edcdac105c32045d18a5126a6a1f1332520e7e5a"} Nov 28 14:44:42 crc kubenswrapper[4857]: I1128 14:44:42.622329 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mgnrw" event={"ID":"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f","Type":"ContainerStarted","Data":"e7f4d5a860a2fecc3bae7e54353c95e2483cc60e8ac0960f8e997e8e9c01114f"} Nov 28 14:44:42 crc kubenswrapper[4857]: I1128 14:44:42.640410 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-mgnrw" podStartSLOduration=2.161708919 podStartE2EDuration="4.640391691s" podCreationTimestamp="2025-11-28 14:44:38 +0000 UTC" firstStartedPulling="2025-11-28 14:44:39.591200942 +0000 UTC m=+4529.715142379" lastFinishedPulling="2025-11-28 14:44:42.069883724 +0000 UTC m=+4532.193825151" observedRunningTime="2025-11-28 14:44:42.639148658 +0000 UTC m=+4532.763090095" watchObservedRunningTime="2025-11-28 14:44:42.640391691 +0000 UTC m=+4532.764333128" Nov 28 14:44:46 crc kubenswrapper[4857]: I1128 14:44:46.229294 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:44:46 crc kubenswrapper[4857]: E1128 14:44:46.229737 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:44:48 crc kubenswrapper[4857]: I1128 14:44:48.816121 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:48 crc kubenswrapper[4857]: I1128 14:44:48.816509 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:48 crc kubenswrapper[4857]: I1128 14:44:48.864217 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:49 crc kubenswrapper[4857]: I1128 14:44:49.702332 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:49 crc kubenswrapper[4857]: I1128 14:44:49.749542 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mgnrw"] Nov 28 14:44:51 crc kubenswrapper[4857]: I1128 14:44:51.677137 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-mgnrw" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="registry-server" containerID="cri-o://e7f4d5a860a2fecc3bae7e54353c95e2483cc60e8ac0960f8e997e8e9c01114f" gracePeriod=2 Nov 28 14:44:52 crc kubenswrapper[4857]: I1128 14:44:52.687649 4857 generic.go:334] "Generic (PLEG): container finished" podID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerID="e7f4d5a860a2fecc3bae7e54353c95e2483cc60e8ac0960f8e997e8e9c01114f" exitCode=0 Nov 28 14:44:52 crc kubenswrapper[4857]: I1128 14:44:52.688159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mgnrw" event={"ID":"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f","Type":"ContainerDied","Data":"e7f4d5a860a2fecc3bae7e54353c95e2483cc60e8ac0960f8e997e8e9c01114f"} Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.236694 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.269207 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9jd8\" (UniqueName: \"kubernetes.io/projected/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-kube-api-access-d9jd8\") pod \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.269286 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-utilities\") pod \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.269400 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-catalog-content\") pod \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\" (UID: \"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f\") " Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.272473 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-utilities" (OuterVolumeSpecName: "utilities") pod "05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" (UID: "05a2c5f4-71f4-4c43-9ddd-33925d91ac8f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.277323 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-kube-api-access-d9jd8" (OuterVolumeSpecName: "kube-api-access-d9jd8") pod "05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" (UID: "05a2c5f4-71f4-4c43-9ddd-33925d91ac8f"). InnerVolumeSpecName "kube-api-access-d9jd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.324660 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" (UID: "05a2c5f4-71f4-4c43-9ddd-33925d91ac8f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.371239 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9jd8\" (UniqueName: \"kubernetes.io/projected/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-kube-api-access-d9jd8\") on node \"crc\" DevicePath \"\"" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.371267 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.371276 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.703935 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-mgnrw" event={"ID":"05a2c5f4-71f4-4c43-9ddd-33925d91ac8f","Type":"ContainerDied","Data":"76ee1fde0add1aafa5cc7c795d3b87487d4cc2d9c61685e6435d33d782d286a6"} Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.704013 4857 scope.go:117] "RemoveContainer" containerID="e7f4d5a860a2fecc3bae7e54353c95e2483cc60e8ac0960f8e997e8e9c01114f" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.704182 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-mgnrw" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.739697 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-mgnrw"] Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.744353 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-mgnrw"] Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.751211 4857 scope.go:117] "RemoveContainer" containerID="b3b4788f8ed251ddc668b5e3edcdac105c32045d18a5126a6a1f1332520e7e5a" Nov 28 14:44:53 crc kubenswrapper[4857]: I1128 14:44:53.775667 4857 scope.go:117] "RemoveContainer" containerID="c252d149edae3986657cc17c87a7c28e66938de251424204c68414b589efcf3a" Nov 28 14:44:54 crc kubenswrapper[4857]: I1128 14:44:54.241015 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" path="/var/lib/kubelet/pods/05a2c5f4-71f4-4c43-9ddd-33925d91ac8f/volumes" Nov 28 14:44:59 crc kubenswrapper[4857]: I1128 14:44:59.228852 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:44:59 crc kubenswrapper[4857]: E1128 14:44:59.229538 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.166787 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t"] Nov 28 14:45:00 crc kubenswrapper[4857]: E1128 14:45:00.167130 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="extract-content" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.167152 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="extract-content" Nov 28 14:45:00 crc kubenswrapper[4857]: E1128 14:45:00.167167 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="extract-utilities" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.167174 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="extract-utilities" Nov 28 14:45:00 crc kubenswrapper[4857]: E1128 14:45:00.167184 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="registry-server" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.167192 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="registry-server" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.167349 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="05a2c5f4-71f4-4c43-9ddd-33925d91ac8f" containerName="registry-server" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.167911 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.169882 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.170226 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.177224 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t"] Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.275384 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49637a1d-30df-4680-9bca-b28a28be48cb-secret-volume\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.275468 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49637a1d-30df-4680-9bca-b28a28be48cb-config-volume\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.275569 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m5rl9\" (UniqueName: \"kubernetes.io/projected/49637a1d-30df-4680-9bca-b28a28be48cb-kube-api-access-m5rl9\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.377209 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49637a1d-30df-4680-9bca-b28a28be48cb-secret-volume\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.377285 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49637a1d-30df-4680-9bca-b28a28be48cb-config-volume\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.377355 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m5rl9\" (UniqueName: \"kubernetes.io/projected/49637a1d-30df-4680-9bca-b28a28be48cb-kube-api-access-m5rl9\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.378368 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49637a1d-30df-4680-9bca-b28a28be48cb-config-volume\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.407052 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49637a1d-30df-4680-9bca-b28a28be48cb-secret-volume\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.427589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m5rl9\" (UniqueName: \"kubernetes.io/projected/49637a1d-30df-4680-9bca-b28a28be48cb-kube-api-access-m5rl9\") pod \"collect-profiles-29405685-wvm5t\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.484538 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:00 crc kubenswrapper[4857]: I1128 14:45:00.961634 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t"] Nov 28 14:45:01 crc kubenswrapper[4857]: I1128 14:45:01.767739 4857 generic.go:334] "Generic (PLEG): container finished" podID="49637a1d-30df-4680-9bca-b28a28be48cb" containerID="8b2c782b20a0ea4fde578586a42254b1ccdd5f92f08b4c1fc8f114c5aa693d30" exitCode=0 Nov 28 14:45:01 crc kubenswrapper[4857]: I1128 14:45:01.767780 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" event={"ID":"49637a1d-30df-4680-9bca-b28a28be48cb","Type":"ContainerDied","Data":"8b2c782b20a0ea4fde578586a42254b1ccdd5f92f08b4c1fc8f114c5aa693d30"} Nov 28 14:45:01 crc kubenswrapper[4857]: I1128 14:45:01.767805 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" event={"ID":"49637a1d-30df-4680-9bca-b28a28be48cb","Type":"ContainerStarted","Data":"45670f4892648a98315a5f54ea6b8f6b962f7b3b4075441013c8cd42d04d8f64"} Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.152773 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.216690 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49637a1d-30df-4680-9bca-b28a28be48cb-secret-volume\") pod \"49637a1d-30df-4680-9bca-b28a28be48cb\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.216927 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m5rl9\" (UniqueName: \"kubernetes.io/projected/49637a1d-30df-4680-9bca-b28a28be48cb-kube-api-access-m5rl9\") pod \"49637a1d-30df-4680-9bca-b28a28be48cb\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.216992 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49637a1d-30df-4680-9bca-b28a28be48cb-config-volume\") pod \"49637a1d-30df-4680-9bca-b28a28be48cb\" (UID: \"49637a1d-30df-4680-9bca-b28a28be48cb\") " Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.218863 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49637a1d-30df-4680-9bca-b28a28be48cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "49637a1d-30df-4680-9bca-b28a28be48cb" (UID: "49637a1d-30df-4680-9bca-b28a28be48cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.223855 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49637a1d-30df-4680-9bca-b28a28be48cb-kube-api-access-m5rl9" (OuterVolumeSpecName: "kube-api-access-m5rl9") pod "49637a1d-30df-4680-9bca-b28a28be48cb" (UID: "49637a1d-30df-4680-9bca-b28a28be48cb"). InnerVolumeSpecName "kube-api-access-m5rl9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.236046 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49637a1d-30df-4680-9bca-b28a28be48cb-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "49637a1d-30df-4680-9bca-b28a28be48cb" (UID: "49637a1d-30df-4680-9bca-b28a28be48cb"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.330782 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m5rl9\" (UniqueName: \"kubernetes.io/projected/49637a1d-30df-4680-9bca-b28a28be48cb-kube-api-access-m5rl9\") on node \"crc\" DevicePath \"\"" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.330906 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49637a1d-30df-4680-9bca-b28a28be48cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.330929 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49637a1d-30df-4680-9bca-b28a28be48cb-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.783018 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" event={"ID":"49637a1d-30df-4680-9bca-b28a28be48cb","Type":"ContainerDied","Data":"45670f4892648a98315a5f54ea6b8f6b962f7b3b4075441013c8cd42d04d8f64"} Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.783342 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45670f4892648a98315a5f54ea6b8f6b962f7b3b4075441013c8cd42d04d8f64" Nov 28 14:45:03 crc kubenswrapper[4857]: I1128 14:45:03.783093 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t" Nov 28 14:45:04 crc kubenswrapper[4857]: I1128 14:45:04.243133 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2"] Nov 28 14:45:04 crc kubenswrapper[4857]: I1128 14:45:04.250164 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405640-826p2"] Nov 28 14:45:06 crc kubenswrapper[4857]: I1128 14:45:06.237464 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed82d2ae-eed6-482d-b727-95a6ee985903" path="/var/lib/kubelet/pods/ed82d2ae-eed6-482d-b727-95a6ee985903/volumes" Nov 28 14:45:11 crc kubenswrapper[4857]: I1128 14:45:11.228932 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:45:11 crc kubenswrapper[4857]: E1128 14:45:11.229825 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:45:16 crc kubenswrapper[4857]: I1128 14:45:16.445849 4857 scope.go:117] "RemoveContainer" containerID="b66a2bf5a9e8d5adfa4cb09f9f77444013aaf21294f13611970e87e6340d651c" Nov 28 14:45:22 crc kubenswrapper[4857]: I1128 14:45:22.228759 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:45:22 crc kubenswrapper[4857]: E1128 14:45:22.229543 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:45:33 crc kubenswrapper[4857]: I1128 14:45:33.228629 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:45:33 crc kubenswrapper[4857]: E1128 14:45:33.229335 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:45:47 crc kubenswrapper[4857]: I1128 14:45:47.228343 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:45:47 crc kubenswrapper[4857]: E1128 14:45:47.229252 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:45:59 crc kubenswrapper[4857]: I1128 14:45:59.229075 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:45:59 crc kubenswrapper[4857]: E1128 14:45:59.230588 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:46:11 crc kubenswrapper[4857]: I1128 14:46:11.230091 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:46:11 crc kubenswrapper[4857]: E1128 14:46:11.231416 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:46:24 crc kubenswrapper[4857]: I1128 14:46:24.230058 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:46:24 crc kubenswrapper[4857]: E1128 14:46:24.231384 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:46:36 crc kubenswrapper[4857]: I1128 14:46:36.228849 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:46:36 crc kubenswrapper[4857]: E1128 14:46:36.230158 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:46:51 crc kubenswrapper[4857]: I1128 14:46:51.229794 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:46:51 crc kubenswrapper[4857]: E1128 14:46:51.231332 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:47:02 crc kubenswrapper[4857]: I1128 14:47:02.228791 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:47:02 crc kubenswrapper[4857]: E1128 14:47:02.230070 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.106095 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-5kpc2"] Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.110837 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-5kpc2"] Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.269906 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-n7cq7"] Nov 28 14:47:11 crc kubenswrapper[4857]: E1128 14:47:11.270437 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49637a1d-30df-4680-9bca-b28a28be48cb" containerName="collect-profiles" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.270466 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="49637a1d-30df-4680-9bca-b28a28be48cb" containerName="collect-profiles" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.270723 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="49637a1d-30df-4680-9bca-b28a28be48cb" containerName="collect-profiles" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.271520 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.275521 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.275676 4857 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-g8nhf" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.276319 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.276555 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.290722 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-n7cq7"] Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.417337 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc77r\" (UniqueName: \"kubernetes.io/projected/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-kube-api-access-zc77r\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.417409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-node-mnt\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.417557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-crc-storage\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.519624 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-crc-storage\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.519706 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc77r\" (UniqueName: \"kubernetes.io/projected/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-kube-api-access-zc77r\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.519748 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-node-mnt\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.520215 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-node-mnt\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.521155 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-crc-storage\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.559293 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc77r\" (UniqueName: \"kubernetes.io/projected/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-kube-api-access-zc77r\") pod \"crc-storage-crc-n7cq7\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:11 crc kubenswrapper[4857]: I1128 14:47:11.644889 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:12 crc kubenswrapper[4857]: I1128 14:47:12.140406 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-n7cq7"] Nov 28 14:47:12 crc kubenswrapper[4857]: I1128 14:47:12.243699 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebafba86-f75f-4f22-b85b-1a65df2eaadd" path="/var/lib/kubelet/pods/ebafba86-f75f-4f22-b85b-1a65df2eaadd/volumes" Nov 28 14:47:12 crc kubenswrapper[4857]: I1128 14:47:12.895020 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-n7cq7" event={"ID":"3c28ca71-091e-41eb-ae38-dbd46c79ffe3","Type":"ContainerStarted","Data":"6ed74f7ec80885ae8ee41d21db5adc283e43036e314455b0b6c433f71e2a1a85"} Nov 28 14:47:13 crc kubenswrapper[4857]: I1128 14:47:13.903986 4857 generic.go:334] "Generic (PLEG): container finished" podID="3c28ca71-091e-41eb-ae38-dbd46c79ffe3" containerID="279c36dd31ab1160978dc5f82feae94386b0e55002249d92a7beb2f8a77bd67d" exitCode=0 Nov 28 14:47:13 crc kubenswrapper[4857]: I1128 14:47:13.904069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-n7cq7" event={"ID":"3c28ca71-091e-41eb-ae38-dbd46c79ffe3","Type":"ContainerDied","Data":"279c36dd31ab1160978dc5f82feae94386b0e55002249d92a7beb2f8a77bd67d"} Nov 28 14:47:14 crc kubenswrapper[4857]: I1128 14:47:14.229190 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:47:14 crc kubenswrapper[4857]: E1128 14:47:14.229786 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.278304 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.380613 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc77r\" (UniqueName: \"kubernetes.io/projected/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-kube-api-access-zc77r\") pod \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.380731 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-crc-storage\") pod \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.380795 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-node-mnt\") pod \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\" (UID: \"3c28ca71-091e-41eb-ae38-dbd46c79ffe3\") " Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.381150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "3c28ca71-091e-41eb-ae38-dbd46c79ffe3" (UID: "3c28ca71-091e-41eb-ae38-dbd46c79ffe3"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.386865 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-kube-api-access-zc77r" (OuterVolumeSpecName: "kube-api-access-zc77r") pod "3c28ca71-091e-41eb-ae38-dbd46c79ffe3" (UID: "3c28ca71-091e-41eb-ae38-dbd46c79ffe3"). InnerVolumeSpecName "kube-api-access-zc77r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.403447 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "3c28ca71-091e-41eb-ae38-dbd46c79ffe3" (UID: "3c28ca71-091e-41eb-ae38-dbd46c79ffe3"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.483121 4857 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.483170 4857 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.483182 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc77r\" (UniqueName: \"kubernetes.io/projected/3c28ca71-091e-41eb-ae38-dbd46c79ffe3-kube-api-access-zc77r\") on node \"crc\" DevicePath \"\"" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.935321 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-n7cq7" event={"ID":"3c28ca71-091e-41eb-ae38-dbd46c79ffe3","Type":"ContainerDied","Data":"6ed74f7ec80885ae8ee41d21db5adc283e43036e314455b0b6c433f71e2a1a85"} Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.935388 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ed74f7ec80885ae8ee41d21db5adc283e43036e314455b0b6c433f71e2a1a85" Nov 28 14:47:15 crc kubenswrapper[4857]: I1128 14:47:15.935436 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-n7cq7" Nov 28 14:47:16 crc kubenswrapper[4857]: I1128 14:47:16.728376 4857 scope.go:117] "RemoveContainer" containerID="d546ee6712067e8cd8df2b617ef7312089b4cf7166fd686cf70c9b6258528935" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.422988 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-n7cq7"] Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.427542 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-n7cq7"] Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.548914 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-tf7wv"] Nov 28 14:47:17 crc kubenswrapper[4857]: E1128 14:47:17.549223 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c28ca71-091e-41eb-ae38-dbd46c79ffe3" containerName="storage" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.549236 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c28ca71-091e-41eb-ae38-dbd46c79ffe3" containerName="storage" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.549412 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c28ca71-091e-41eb-ae38-dbd46c79ffe3" containerName="storage" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.549869 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.553422 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.553501 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.553646 4857 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-g8nhf" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.553813 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.561489 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-tf7wv"] Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.622715 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b46849c6-45f3-415d-9251-8abd7ec6a09c-crc-storage\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.622940 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkcv6\" (UniqueName: \"kubernetes.io/projected/b46849c6-45f3-415d-9251-8abd7ec6a09c-kube-api-access-wkcv6\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.623020 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b46849c6-45f3-415d-9251-8abd7ec6a09c-node-mnt\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.724929 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkcv6\" (UniqueName: \"kubernetes.io/projected/b46849c6-45f3-415d-9251-8abd7ec6a09c-kube-api-access-wkcv6\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.725010 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b46849c6-45f3-415d-9251-8abd7ec6a09c-node-mnt\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.725103 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b46849c6-45f3-415d-9251-8abd7ec6a09c-crc-storage\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.725447 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b46849c6-45f3-415d-9251-8abd7ec6a09c-node-mnt\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.726112 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b46849c6-45f3-415d-9251-8abd7ec6a09c-crc-storage\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.749468 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkcv6\" (UniqueName: \"kubernetes.io/projected/b46849c6-45f3-415d-9251-8abd7ec6a09c-kube-api-access-wkcv6\") pod \"crc-storage-crc-tf7wv\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:17 crc kubenswrapper[4857]: I1128 14:47:17.879573 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:18 crc kubenswrapper[4857]: I1128 14:47:18.239357 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c28ca71-091e-41eb-ae38-dbd46c79ffe3" path="/var/lib/kubelet/pods/3c28ca71-091e-41eb-ae38-dbd46c79ffe3/volumes" Nov 28 14:47:18 crc kubenswrapper[4857]: I1128 14:47:18.337718 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-tf7wv"] Nov 28 14:47:18 crc kubenswrapper[4857]: I1128 14:47:18.960931 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-tf7wv" event={"ID":"b46849c6-45f3-415d-9251-8abd7ec6a09c","Type":"ContainerStarted","Data":"da372c98638adb1cc8354ded5c1ed8ab067d8ef5547439a88f1bb2b89f045be4"} Nov 28 14:47:19 crc kubenswrapper[4857]: I1128 14:47:19.970481 4857 generic.go:334] "Generic (PLEG): container finished" podID="b46849c6-45f3-415d-9251-8abd7ec6a09c" containerID="090b934ed357321f79b591ca14ab678efdd0d3847b44aede74743e705d4ebe97" exitCode=0 Nov 28 14:47:19 crc kubenswrapper[4857]: I1128 14:47:19.970538 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-tf7wv" event={"ID":"b46849c6-45f3-415d-9251-8abd7ec6a09c","Type":"ContainerDied","Data":"090b934ed357321f79b591ca14ab678efdd0d3847b44aede74743e705d4ebe97"} Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.400313 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.484770 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkcv6\" (UniqueName: \"kubernetes.io/projected/b46849c6-45f3-415d-9251-8abd7ec6a09c-kube-api-access-wkcv6\") pod \"b46849c6-45f3-415d-9251-8abd7ec6a09c\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.485122 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b46849c6-45f3-415d-9251-8abd7ec6a09c-crc-storage\") pod \"b46849c6-45f3-415d-9251-8abd7ec6a09c\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.485203 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b46849c6-45f3-415d-9251-8abd7ec6a09c-node-mnt\") pod \"b46849c6-45f3-415d-9251-8abd7ec6a09c\" (UID: \"b46849c6-45f3-415d-9251-8abd7ec6a09c\") " Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.485377 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b46849c6-45f3-415d-9251-8abd7ec6a09c-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "b46849c6-45f3-415d-9251-8abd7ec6a09c" (UID: "b46849c6-45f3-415d-9251-8abd7ec6a09c"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.485589 4857 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/b46849c6-45f3-415d-9251-8abd7ec6a09c-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.490871 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b46849c6-45f3-415d-9251-8abd7ec6a09c-kube-api-access-wkcv6" (OuterVolumeSpecName: "kube-api-access-wkcv6") pod "b46849c6-45f3-415d-9251-8abd7ec6a09c" (UID: "b46849c6-45f3-415d-9251-8abd7ec6a09c"). InnerVolumeSpecName "kube-api-access-wkcv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.506762 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b46849c6-45f3-415d-9251-8abd7ec6a09c-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "b46849c6-45f3-415d-9251-8abd7ec6a09c" (UID: "b46849c6-45f3-415d-9251-8abd7ec6a09c"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.587250 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkcv6\" (UniqueName: \"kubernetes.io/projected/b46849c6-45f3-415d-9251-8abd7ec6a09c-kube-api-access-wkcv6\") on node \"crc\" DevicePath \"\"" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.587282 4857 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/b46849c6-45f3-415d-9251-8abd7ec6a09c-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.991751 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-tf7wv" event={"ID":"b46849c6-45f3-415d-9251-8abd7ec6a09c","Type":"ContainerDied","Data":"da372c98638adb1cc8354ded5c1ed8ab067d8ef5547439a88f1bb2b89f045be4"} Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.991810 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da372c98638adb1cc8354ded5c1ed8ab067d8ef5547439a88f1bb2b89f045be4" Nov 28 14:47:21 crc kubenswrapper[4857]: I1128 14:47:21.991882 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-tf7wv" Nov 28 14:47:26 crc kubenswrapper[4857]: I1128 14:47:26.229148 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:47:26 crc kubenswrapper[4857]: E1128 14:47:26.230430 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:47:40 crc kubenswrapper[4857]: I1128 14:47:40.233998 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:47:40 crc kubenswrapper[4857]: E1128 14:47:40.234717 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.121453 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dvs9h"] Nov 28 14:47:46 crc kubenswrapper[4857]: E1128 14:47:46.122306 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b46849c6-45f3-415d-9251-8abd7ec6a09c" containerName="storage" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.122326 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b46849c6-45f3-415d-9251-8abd7ec6a09c" containerName="storage" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.122573 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b46849c6-45f3-415d-9251-8abd7ec6a09c" containerName="storage" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.124623 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.131729 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dvs9h"] Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.253834 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-catalog-content\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.253910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xq5h\" (UniqueName: \"kubernetes.io/projected/23b84845-e772-4eb9-9259-cdfbd6f496b6-kube-api-access-9xq5h\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.254022 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-utilities\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.355637 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xq5h\" (UniqueName: \"kubernetes.io/projected/23b84845-e772-4eb9-9259-cdfbd6f496b6-kube-api-access-9xq5h\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.355686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-utilities\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.355781 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-catalog-content\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.356807 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-utilities\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.356843 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-catalog-content\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.378361 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xq5h\" (UniqueName: \"kubernetes.io/projected/23b84845-e772-4eb9-9259-cdfbd6f496b6-kube-api-access-9xq5h\") pod \"redhat-marketplace-dvs9h\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.446238 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:46 crc kubenswrapper[4857]: I1128 14:47:46.904631 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dvs9h"] Nov 28 14:47:47 crc kubenswrapper[4857]: I1128 14:47:47.172476 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerStarted","Data":"e07c571f24334cf1afb94396f7399314e27374f25c4d30025daaed87bd582e6a"} Nov 28 14:47:48 crc kubenswrapper[4857]: I1128 14:47:48.183872 4857 generic.go:334] "Generic (PLEG): container finished" podID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerID="a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536" exitCode=0 Nov 28 14:47:48 crc kubenswrapper[4857]: I1128 14:47:48.183923 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerDied","Data":"a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536"} Nov 28 14:47:49 crc kubenswrapper[4857]: I1128 14:47:49.194536 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerStarted","Data":"dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043"} Nov 28 14:47:50 crc kubenswrapper[4857]: I1128 14:47:50.213774 4857 generic.go:334] "Generic (PLEG): container finished" podID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerID="dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043" exitCode=0 Nov 28 14:47:50 crc kubenswrapper[4857]: I1128 14:47:50.213864 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerDied","Data":"dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043"} Nov 28 14:47:51 crc kubenswrapper[4857]: I1128 14:47:51.224772 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerStarted","Data":"3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf"} Nov 28 14:47:51 crc kubenswrapper[4857]: I1128 14:47:51.248632 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dvs9h" podStartSLOduration=2.733782373 podStartE2EDuration="5.248601166s" podCreationTimestamp="2025-11-28 14:47:46 +0000 UTC" firstStartedPulling="2025-11-28 14:47:48.187251923 +0000 UTC m=+4718.311193360" lastFinishedPulling="2025-11-28 14:47:50.702070716 +0000 UTC m=+4720.826012153" observedRunningTime="2025-11-28 14:47:51.244390474 +0000 UTC m=+4721.368331911" watchObservedRunningTime="2025-11-28 14:47:51.248601166 +0000 UTC m=+4721.372542623" Nov 28 14:47:55 crc kubenswrapper[4857]: I1128 14:47:55.228842 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:47:55 crc kubenswrapper[4857]: E1128 14:47:55.229505 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:47:56 crc kubenswrapper[4857]: I1128 14:47:56.447659 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:56 crc kubenswrapper[4857]: I1128 14:47:56.448082 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:56 crc kubenswrapper[4857]: I1128 14:47:56.490300 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:57 crc kubenswrapper[4857]: I1128 14:47:57.314034 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:47:57 crc kubenswrapper[4857]: I1128 14:47:57.365082 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dvs9h"] Nov 28 14:47:59 crc kubenswrapper[4857]: I1128 14:47:59.276151 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dvs9h" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="registry-server" containerID="cri-o://3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf" gracePeriod=2 Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.190122 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.292362 4857 generic.go:334] "Generic (PLEG): container finished" podID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerID="3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf" exitCode=0 Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.292442 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerDied","Data":"3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf"} Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.292477 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dvs9h" event={"ID":"23b84845-e772-4eb9-9259-cdfbd6f496b6","Type":"ContainerDied","Data":"e07c571f24334cf1afb94396f7399314e27374f25c4d30025daaed87bd582e6a"} Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.292662 4857 scope.go:117] "RemoveContainer" containerID="3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.292641 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dvs9h" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.319174 4857 scope.go:117] "RemoveContainer" containerID="dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.340443 4857 scope.go:117] "RemoveContainer" containerID="a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.350632 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-utilities\") pod \"23b84845-e772-4eb9-9259-cdfbd6f496b6\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.350684 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xq5h\" (UniqueName: \"kubernetes.io/projected/23b84845-e772-4eb9-9259-cdfbd6f496b6-kube-api-access-9xq5h\") pod \"23b84845-e772-4eb9-9259-cdfbd6f496b6\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.350753 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-catalog-content\") pod \"23b84845-e772-4eb9-9259-cdfbd6f496b6\" (UID: \"23b84845-e772-4eb9-9259-cdfbd6f496b6\") " Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.351662 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-utilities" (OuterVolumeSpecName: "utilities") pod "23b84845-e772-4eb9-9259-cdfbd6f496b6" (UID: "23b84845-e772-4eb9-9259-cdfbd6f496b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.356633 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23b84845-e772-4eb9-9259-cdfbd6f496b6-kube-api-access-9xq5h" (OuterVolumeSpecName: "kube-api-access-9xq5h") pod "23b84845-e772-4eb9-9259-cdfbd6f496b6" (UID: "23b84845-e772-4eb9-9259-cdfbd6f496b6"). InnerVolumeSpecName "kube-api-access-9xq5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.365108 4857 scope.go:117] "RemoveContainer" containerID="3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf" Nov 28 14:48:00 crc kubenswrapper[4857]: E1128 14:48:00.365623 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf\": container with ID starting with 3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf not found: ID does not exist" containerID="3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.365667 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf"} err="failed to get container status \"3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf\": rpc error: code = NotFound desc = could not find container \"3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf\": container with ID starting with 3d527b128c90f74ef7cc00e037e6d773b5a76058983f5113cb1b903ce14aaedf not found: ID does not exist" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.365696 4857 scope.go:117] "RemoveContainer" containerID="dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043" Nov 28 14:48:00 crc kubenswrapper[4857]: E1128 14:48:00.366095 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043\": container with ID starting with dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043 not found: ID does not exist" containerID="dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.366178 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043"} err="failed to get container status \"dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043\": rpc error: code = NotFound desc = could not find container \"dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043\": container with ID starting with dcec7b0df9869dfb672b975f83998bdf06a34b23aeaf2e43f32886680f62d043 not found: ID does not exist" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.366233 4857 scope.go:117] "RemoveContainer" containerID="a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536" Nov 28 14:48:00 crc kubenswrapper[4857]: E1128 14:48:00.366705 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536\": container with ID starting with a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536 not found: ID does not exist" containerID="a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.366739 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536"} err="failed to get container status \"a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536\": rpc error: code = NotFound desc = could not find container \"a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536\": container with ID starting with a8a1caeb2a920eefc267d1c9acf7d73b4f532466b49176378b347495a3493536 not found: ID does not exist" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.372767 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "23b84845-e772-4eb9-9259-cdfbd6f496b6" (UID: "23b84845-e772-4eb9-9259-cdfbd6f496b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.452660 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.452708 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/23b84845-e772-4eb9-9259-cdfbd6f496b6-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.452722 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xq5h\" (UniqueName: \"kubernetes.io/projected/23b84845-e772-4eb9-9259-cdfbd6f496b6-kube-api-access-9xq5h\") on node \"crc\" DevicePath \"\"" Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.636449 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dvs9h"] Nov 28 14:48:00 crc kubenswrapper[4857]: I1128 14:48:00.641391 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dvs9h"] Nov 28 14:48:02 crc kubenswrapper[4857]: I1128 14:48:02.237873 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" path="/var/lib/kubelet/pods/23b84845-e772-4eb9-9259-cdfbd6f496b6/volumes" Nov 28 14:48:08 crc kubenswrapper[4857]: I1128 14:48:08.229348 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:48:08 crc kubenswrapper[4857]: E1128 14:48:08.230287 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:48:20 crc kubenswrapper[4857]: I1128 14:48:20.234811 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:48:20 crc kubenswrapper[4857]: I1128 14:48:20.446327 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"e069878b8272165d675b3e5b9b4751408dfbaf5d46c3ee50c635da1d18076ac5"} Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.899168 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-t9vb8"] Nov 28 14:48:38 crc kubenswrapper[4857]: E1128 14:48:38.905709 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="registry-server" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.905836 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="registry-server" Nov 28 14:48:38 crc kubenswrapper[4857]: E1128 14:48:38.905965 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="extract-utilities" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.906077 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="extract-utilities" Nov 28 14:48:38 crc kubenswrapper[4857]: E1128 14:48:38.906177 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="extract-content" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.906259 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="extract-content" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.906502 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="23b84845-e772-4eb9-9259-cdfbd6f496b6" containerName="registry-server" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.907923 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.925855 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t9vb8"] Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.973275 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-catalog-content\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.973637 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-utilities\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:38 crc kubenswrapper[4857]: I1128 14:48:38.973680 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bg6fk\" (UniqueName: \"kubernetes.io/projected/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-kube-api-access-bg6fk\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.074733 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-catalog-content\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.074790 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-utilities\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.074868 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bg6fk\" (UniqueName: \"kubernetes.io/projected/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-kube-api-access-bg6fk\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.075271 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-utilities\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.075421 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-catalog-content\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.114813 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bg6fk\" (UniqueName: \"kubernetes.io/projected/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-kube-api-access-bg6fk\") pod \"community-operators-t9vb8\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.242820 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.713329 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-t9vb8"] Nov 28 14:48:39 crc kubenswrapper[4857]: I1128 14:48:39.729472 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9vb8" event={"ID":"2a34ee09-e02d-4923-a2aa-37b40c06d1e1","Type":"ContainerStarted","Data":"8a09da2f275196780c5f8481b51cb39186f18eb93d44369e5db412fae94722e4"} Nov 28 14:48:40 crc kubenswrapper[4857]: I1128 14:48:40.742077 4857 generic.go:334] "Generic (PLEG): container finished" podID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerID="c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356" exitCode=0 Nov 28 14:48:40 crc kubenswrapper[4857]: I1128 14:48:40.742556 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9vb8" event={"ID":"2a34ee09-e02d-4923-a2aa-37b40c06d1e1","Type":"ContainerDied","Data":"c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356"} Nov 28 14:48:40 crc kubenswrapper[4857]: I1128 14:48:40.745313 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:48:42 crc kubenswrapper[4857]: I1128 14:48:42.757791 4857 generic.go:334] "Generic (PLEG): container finished" podID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerID="e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c" exitCode=0 Nov 28 14:48:42 crc kubenswrapper[4857]: I1128 14:48:42.757836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9vb8" event={"ID":"2a34ee09-e02d-4923-a2aa-37b40c06d1e1","Type":"ContainerDied","Data":"e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c"} Nov 28 14:48:43 crc kubenswrapper[4857]: I1128 14:48:43.766389 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9vb8" event={"ID":"2a34ee09-e02d-4923-a2aa-37b40c06d1e1","Type":"ContainerStarted","Data":"b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a"} Nov 28 14:48:43 crc kubenswrapper[4857]: I1128 14:48:43.789769 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-t9vb8" podStartSLOduration=3.306618012 podStartE2EDuration="5.789743561s" podCreationTimestamp="2025-11-28 14:48:38 +0000 UTC" firstStartedPulling="2025-11-28 14:48:40.745080913 +0000 UTC m=+4770.869022340" lastFinishedPulling="2025-11-28 14:48:43.228206452 +0000 UTC m=+4773.352147889" observedRunningTime="2025-11-28 14:48:43.784548243 +0000 UTC m=+4773.908489690" watchObservedRunningTime="2025-11-28 14:48:43.789743561 +0000 UTC m=+4773.913684998" Nov 28 14:48:49 crc kubenswrapper[4857]: I1128 14:48:49.243185 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:49 crc kubenswrapper[4857]: I1128 14:48:49.243691 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:49 crc kubenswrapper[4857]: I1128 14:48:49.282495 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:49 crc kubenswrapper[4857]: I1128 14:48:49.850838 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:49 crc kubenswrapper[4857]: I1128 14:48:49.894469 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t9vb8"] Nov 28 14:48:51 crc kubenswrapper[4857]: I1128 14:48:51.824738 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-t9vb8" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="registry-server" containerID="cri-o://b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a" gracePeriod=2 Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.214125 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.274240 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-utilities\") pod \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.274790 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bg6fk\" (UniqueName: \"kubernetes.io/projected/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-kube-api-access-bg6fk\") pod \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.275010 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-catalog-content\") pod \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\" (UID: \"2a34ee09-e02d-4923-a2aa-37b40c06d1e1\") " Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.277411 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-utilities" (OuterVolumeSpecName: "utilities") pod "2a34ee09-e02d-4923-a2aa-37b40c06d1e1" (UID: "2a34ee09-e02d-4923-a2aa-37b40c06d1e1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.280495 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-kube-api-access-bg6fk" (OuterVolumeSpecName: "kube-api-access-bg6fk") pod "2a34ee09-e02d-4923-a2aa-37b40c06d1e1" (UID: "2a34ee09-e02d-4923-a2aa-37b40c06d1e1"). InnerVolumeSpecName "kube-api-access-bg6fk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.332381 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2a34ee09-e02d-4923-a2aa-37b40c06d1e1" (UID: "2a34ee09-e02d-4923-a2aa-37b40c06d1e1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.376693 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bg6fk\" (UniqueName: \"kubernetes.io/projected/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-kube-api-access-bg6fk\") on node \"crc\" DevicePath \"\"" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.376737 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.376751 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a34ee09-e02d-4923-a2aa-37b40c06d1e1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.835180 4857 generic.go:334] "Generic (PLEG): container finished" podID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerID="b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a" exitCode=0 Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.835308 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9vb8" event={"ID":"2a34ee09-e02d-4923-a2aa-37b40c06d1e1","Type":"ContainerDied","Data":"b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a"} Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.835526 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-t9vb8" event={"ID":"2a34ee09-e02d-4923-a2aa-37b40c06d1e1","Type":"ContainerDied","Data":"8a09da2f275196780c5f8481b51cb39186f18eb93d44369e5db412fae94722e4"} Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.835553 4857 scope.go:117] "RemoveContainer" containerID="b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.835330 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-t9vb8" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.863859 4857 scope.go:117] "RemoveContainer" containerID="e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.876766 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-t9vb8"] Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.884992 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-t9vb8"] Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.899615 4857 scope.go:117] "RemoveContainer" containerID="c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.913891 4857 scope.go:117] "RemoveContainer" containerID="b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a" Nov 28 14:48:52 crc kubenswrapper[4857]: E1128 14:48:52.914295 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a\": container with ID starting with b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a not found: ID does not exist" containerID="b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.914341 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a"} err="failed to get container status \"b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a\": rpc error: code = NotFound desc = could not find container \"b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a\": container with ID starting with b41938e3fa819f7d2253355ff535e7aa1c5986ede613c2803fe2d84479c5e44a not found: ID does not exist" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.914371 4857 scope.go:117] "RemoveContainer" containerID="e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c" Nov 28 14:48:52 crc kubenswrapper[4857]: E1128 14:48:52.914823 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c\": container with ID starting with e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c not found: ID does not exist" containerID="e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.914844 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c"} err="failed to get container status \"e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c\": rpc error: code = NotFound desc = could not find container \"e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c\": container with ID starting with e75f076508870fe0fee0b6045c5509de2665160fc15aee5074bc12ec2d5bdd6c not found: ID does not exist" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.914856 4857 scope.go:117] "RemoveContainer" containerID="c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356" Nov 28 14:48:52 crc kubenswrapper[4857]: E1128 14:48:52.915112 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356\": container with ID starting with c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356 not found: ID does not exist" containerID="c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356" Nov 28 14:48:52 crc kubenswrapper[4857]: I1128 14:48:52.915138 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356"} err="failed to get container status \"c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356\": rpc error: code = NotFound desc = could not find container \"c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356\": container with ID starting with c3cb02fa5f31aa1aea88c5c5c9e46467548a7b8a80bad801f43c67fdf3940356 not found: ID does not exist" Nov 28 14:48:54 crc kubenswrapper[4857]: I1128 14:48:54.240566 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" path="/var/lib/kubelet/pods/2a34ee09-e02d-4923-a2aa-37b40c06d1e1/volumes" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.177587 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-szjdl"] Nov 28 14:50:34 crc kubenswrapper[4857]: E1128 14:50:34.178705 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="registry-server" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.178722 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="registry-server" Nov 28 14:50:34 crc kubenswrapper[4857]: E1128 14:50:34.178749 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="extract-utilities" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.178756 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="extract-utilities" Nov 28 14:50:34 crc kubenswrapper[4857]: E1128 14:50:34.178767 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="extract-content" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.178773 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="extract-content" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.178935 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a34ee09-e02d-4923-a2aa-37b40c06d1e1" containerName="registry-server" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.179834 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.182883 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-qblxw" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.182929 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.182972 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.188371 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.188371 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.188763 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-szjdl"] Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.290368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqf8k\" (UniqueName: \"kubernetes.io/projected/6d048a01-3774-460f-be30-35633e7fca67-kube-api-access-fqf8k\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.290412 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.290509 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-config\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.391801 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-config\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.392137 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqf8k\" (UniqueName: \"kubernetes.io/projected/6d048a01-3774-460f-be30-35633e7fca67-kube-api-access-fqf8k\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.392238 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.392844 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-config\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.393237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-dns-svc\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.462886 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-mt6hz"] Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.464804 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.480553 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-mt6hz"] Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.493734 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.493798 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-config\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.493820 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjf4x\" (UniqueName: \"kubernetes.io/projected/725dd93e-beeb-4412-9b51-0748187361f2-kube-api-access-sjf4x\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.574104 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqf8k\" (UniqueName: \"kubernetes.io/projected/6d048a01-3774-460f-be30-35633e7fca67-kube-api-access-fqf8k\") pod \"dnsmasq-dns-5d7b5456f5-szjdl\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.597001 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjf4x\" (UniqueName: \"kubernetes.io/projected/725dd93e-beeb-4412-9b51-0748187361f2-kube-api-access-sjf4x\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.597679 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.597838 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-config\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.598857 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-dns-svc\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.599625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-config\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.620735 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjf4x\" (UniqueName: \"kubernetes.io/projected/725dd93e-beeb-4412-9b51-0748187361f2-kube-api-access-sjf4x\") pod \"dnsmasq-dns-98ddfc8f-mt6hz\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.788822 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:34 crc kubenswrapper[4857]: I1128 14:50:34.799614 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.231334 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-szjdl"] Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.314269 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-mt6hz"] Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.336406 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.338003 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.342420 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.343007 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-88pth" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.343289 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.343983 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.344325 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.356578 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408150 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sp8wq\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-kube-api-access-sp8wq\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408222 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/87a4c792-2171-4ac3-a785-497e679f4aff-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408277 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/87a4c792-2171-4ac3-a785-497e679f4aff-pod-info\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408315 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-server-conf\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408342 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408383 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408406 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408443 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.408521 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510081 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-server-conf\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510148 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510207 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510223 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510249 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510267 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510288 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sp8wq\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-kube-api-access-sp8wq\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510392 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/87a4c792-2171-4ac3-a785-497e679f4aff-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.510443 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/87a4c792-2171-4ac3-a785-497e679f4aff-pod-info\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.511185 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.511430 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.511982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-server-conf\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.512284 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.513322 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.513355 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/583aeee6825a956232d19ae3704718122e85db39aa0e96108b6210a7edfa3471/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.516440 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/87a4c792-2171-4ac3-a785-497e679f4aff-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.516664 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/87a4c792-2171-4ac3-a785-497e679f4aff-pod-info\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.517047 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.532671 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sp8wq\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-kube-api-access-sp8wq\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.559862 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.642034 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.643748 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.646412 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.646571 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-2zqjt" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.646606 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.646451 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.646464 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.657388 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.660509 4857 generic.go:334] "Generic (PLEG): container finished" podID="725dd93e-beeb-4412-9b51-0748187361f2" containerID="0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823" exitCode=0 Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.660573 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" event={"ID":"725dd93e-beeb-4412-9b51-0748187361f2","Type":"ContainerDied","Data":"0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823"} Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.660605 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" event={"ID":"725dd93e-beeb-4412-9b51-0748187361f2","Type":"ContainerStarted","Data":"bd54747dcbb2949c8c51c80e6c1df58137c31fc13c94fcd555172f921fc187ef"} Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.662106 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.693187 4857 generic.go:334] "Generic (PLEG): container finished" podID="6d048a01-3774-460f-be30-35633e7fca67" containerID="d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563" exitCode=0 Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.693223 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" event={"ID":"6d048a01-3774-460f-be30-35633e7fca67","Type":"ContainerDied","Data":"d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563"} Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.693247 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" event={"ID":"6d048a01-3774-460f-be30-35633e7fca67","Type":"ContainerStarted","Data":"0bc3eb9840d521b87e200bbeda7c1cc46ccd9b2609d28210a8ad7611a1ebe6bc"} Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824034 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b571b20-3451-40e3-90dd-100d2d8b7403-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824078 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824153 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824179 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824218 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwgwl\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-kube-api-access-rwgwl\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.824268 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b571b20-3451-40e3-90dd-100d2d8b7403-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.926631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b571b20-3451-40e3-90dd-100d2d8b7403-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927050 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927079 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927102 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927138 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927169 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927218 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927243 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwgwl\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-kube-api-access-rwgwl\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.927271 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b571b20-3451-40e3-90dd-100d2d8b7403-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.928281 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.928407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.928639 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.929846 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.929872 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/748cc75642fca8791130b40037ed5ae311d1be97c9be64fd3b675883dd653e88/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.931377 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.932440 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b571b20-3451-40e3-90dd-100d2d8b7403-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.932555 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.932738 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b571b20-3451-40e3-90dd-100d2d8b7403-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.950063 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwgwl\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-kube-api-access-rwgwl\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:35 crc kubenswrapper[4857]: I1128 14:50:35.972864 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.202701 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:50:36 crc kubenswrapper[4857]: W1128 14:50:36.206782 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87a4c792_2171_4ac3_a785_497e679f4aff.slice/crio-c0fb340ec96212a2fe4099aaf307fa20f6048765b806c65f0e29b1bd9afa9f4c WatchSource:0}: Error finding container c0fb340ec96212a2fe4099aaf307fa20f6048765b806c65f0e29b1bd9afa9f4c: Status 404 returned error can't find the container with id c0fb340ec96212a2fe4099aaf307fa20f6048765b806c65f0e29b1bd9afa9f4c Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.269810 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.690691 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.705605 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" event={"ID":"6d048a01-3774-460f-be30-35633e7fca67","Type":"ContainerStarted","Data":"10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068"} Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.705772 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.708288 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" event={"ID":"725dd93e-beeb-4412-9b51-0748187361f2","Type":"ContainerStarted","Data":"6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae"} Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.708781 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.709787 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"87a4c792-2171-4ac3-a785-497e679f4aff","Type":"ContainerStarted","Data":"c0fb340ec96212a2fe4099aaf307fa20f6048765b806c65f0e29b1bd9afa9f4c"} Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.729935 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" podStartSLOduration=2.729898123 podStartE2EDuration="2.729898123s" podCreationTimestamp="2025-11-28 14:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:50:36.722495706 +0000 UTC m=+4886.846437163" watchObservedRunningTime="2025-11-28 14:50:36.729898123 +0000 UTC m=+4886.853839560" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.750506 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" podStartSLOduration=2.750476332 podStartE2EDuration="2.750476332s" podCreationTimestamp="2025-11-28 14:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:50:36.742832898 +0000 UTC m=+4886.866774375" watchObservedRunningTime="2025-11-28 14:50:36.750476332 +0000 UTC m=+4886.874417769" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.813661 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.815060 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.818017 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.818399 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.818584 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.819973 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-hntx2" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.825267 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.831641 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943421 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-kolla-config\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f952e7bb-8570-4caf-bb87-e84cd31506b9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943595 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943745 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-config-data-default\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943813 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsr5v\" (UniqueName: \"kubernetes.io/projected/f952e7bb-8570-4caf-bb87-e84cd31506b9-kube-api-access-jsr5v\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943848 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f952e7bb-8570-4caf-bb87-e84cd31506b9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943901 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:36 crc kubenswrapper[4857]: I1128 14:50:36.943991 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f952e7bb-8570-4caf-bb87-e84cd31506b9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045134 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-kolla-config\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045197 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f952e7bb-8570-4caf-bb87-e84cd31506b9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045224 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-config-data-default\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045284 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsr5v\" (UniqueName: \"kubernetes.io/projected/f952e7bb-8570-4caf-bb87-e84cd31506b9-kube-api-access-jsr5v\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045311 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f952e7bb-8570-4caf-bb87-e84cd31506b9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045361 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.045406 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f952e7bb-8570-4caf-bb87-e84cd31506b9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.046332 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/f952e7bb-8570-4caf-bb87-e84cd31506b9-config-data-generated\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.046615 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-kolla-config\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.046655 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-config-data-default\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.047897 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f952e7bb-8570-4caf-bb87-e84cd31506b9-operator-scripts\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.047977 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.048006 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/900ecf0bc13baaa5e50765aed6ecf5dd255ac65a45ef1dbfc6f3159201b15539/globalmount\"" pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.079561 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/f952e7bb-8570-4caf-bb87-e84cd31506b9-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.079892 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f952e7bb-8570-4caf-bb87-e84cd31506b9-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.096458 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsr5v\" (UniqueName: \"kubernetes.io/projected/f952e7bb-8570-4caf-bb87-e84cd31506b9-kube-api-access-jsr5v\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.126638 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-aaf6dd26-fddc-4f73-a1d0-97b8b2e6f914\") pod \"openstack-galera-0\" (UID: \"f952e7bb-8570-4caf-bb87-e84cd31506b9\") " pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.144467 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.223387 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.224448 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.226455 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-mwz6d" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.226464 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.239897 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.350851 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/55f0c5ec-efb9-4049-82fb-2b33d503e84e-kolla-config\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.350963 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55f0c5ec-efb9-4049-82fb-2b33d503e84e-config-data\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.351021 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mfxj\" (UniqueName: \"kubernetes.io/projected/55f0c5ec-efb9-4049-82fb-2b33d503e84e-kube-api-access-6mfxj\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.452351 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/55f0c5ec-efb9-4049-82fb-2b33d503e84e-kolla-config\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.452389 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55f0c5ec-efb9-4049-82fb-2b33d503e84e-config-data\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.452444 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mfxj\" (UniqueName: \"kubernetes.io/projected/55f0c5ec-efb9-4049-82fb-2b33d503e84e-kube-api-access-6mfxj\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.453841 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/55f0c5ec-efb9-4049-82fb-2b33d503e84e-config-data\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.453886 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/55f0c5ec-efb9-4049-82fb-2b33d503e84e-kolla-config\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.475401 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mfxj\" (UniqueName: \"kubernetes.io/projected/55f0c5ec-efb9-4049-82fb-2b33d503e84e-kube-api-access-6mfxj\") pod \"memcached-0\" (UID: \"55f0c5ec-efb9-4049-82fb-2b33d503e84e\") " pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.541008 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.654660 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 28 14:50:37 crc kubenswrapper[4857]: W1128 14:50:37.659353 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf952e7bb_8570_4caf_bb87_e84cd31506b9.slice/crio-1a2a68da27380c99ac64ed80f11be328d66305bbccabb4974be077ba6ffb84ec WatchSource:0}: Error finding container 1a2a68da27380c99ac64ed80f11be328d66305bbccabb4974be077ba6ffb84ec: Status 404 returned error can't find the container with id 1a2a68da27380c99ac64ed80f11be328d66305bbccabb4974be077ba6ffb84ec Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.725378 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b571b20-3451-40e3-90dd-100d2d8b7403","Type":"ContainerStarted","Data":"0ed5d24d982efd10f243cf3395ab030760b380b70a0e09b915726c538bbdfd78"} Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.730775 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f952e7bb-8570-4caf-bb87-e84cd31506b9","Type":"ContainerStarted","Data":"1a2a68da27380c99ac64ed80f11be328d66305bbccabb4974be077ba6ffb84ec"} Nov 28 14:50:37 crc kubenswrapper[4857]: I1128 14:50:37.733396 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"87a4c792-2171-4ac3-a785-497e679f4aff","Type":"ContainerStarted","Data":"8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b"} Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.012793 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 28 14:50:38 crc kubenswrapper[4857]: W1128 14:50:38.019114 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55f0c5ec_efb9_4049_82fb_2b33d503e84e.slice/crio-ad49d7eb313bbc522005a946c9c11d3c67a25bbdab0845db4b37a5e8aa711e33 WatchSource:0}: Error finding container ad49d7eb313bbc522005a946c9c11d3c67a25bbdab0845db4b37a5e8aa711e33: Status 404 returned error can't find the container with id ad49d7eb313bbc522005a946c9c11d3c67a25bbdab0845db4b37a5e8aa711e33 Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.159692 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.161150 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.164733 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.164772 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-tlcrb" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.164931 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.165081 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.171406 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.275866 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c87lf\" (UniqueName: \"kubernetes.io/projected/30b56f73-5bef-414c-bb50-9b8d7f5afd91-kube-api-access-c87lf\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.275969 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.276179 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/30b56f73-5bef-414c-bb50-9b8d7f5afd91-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.276387 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.276422 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b56f73-5bef-414c-bb50-9b8d7f5afd91-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.276534 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b56f73-5bef-414c-bb50-9b8d7f5afd91-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.276825 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.276956 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.378846 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c87lf\" (UniqueName: \"kubernetes.io/projected/30b56f73-5bef-414c-bb50-9b8d7f5afd91-kube-api-access-c87lf\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.378925 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.378962 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/30b56f73-5bef-414c-bb50-9b8d7f5afd91-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.379007 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.379028 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b56f73-5bef-414c-bb50-9b8d7f5afd91-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.379063 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b56f73-5bef-414c-bb50-9b8d7f5afd91-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.379105 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.379672 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/30b56f73-5bef-414c-bb50-9b8d7f5afd91-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.380390 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.380640 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.381163 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.381876 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/30b56f73-5bef-414c-bb50-9b8d7f5afd91-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.385496 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30b56f73-5bef-414c-bb50-9b8d7f5afd91-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.386035 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/30b56f73-5bef-414c-bb50-9b8d7f5afd91-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.386589 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.386619 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/5d5ea3b69497f3abc711cdf45a59ee4e30ad0dbfca2e92a808a5a7b6e2397ad2/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.398526 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c87lf\" (UniqueName: \"kubernetes.io/projected/30b56f73-5bef-414c-bb50-9b8d7f5afd91-kube-api-access-c87lf\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.413513 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-40dac5a7-fe4f-410e-83a7-3f983e774d94\") pod \"openstack-cell1-galera-0\" (UID: \"30b56f73-5bef-414c-bb50-9b8d7f5afd91\") " pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.476557 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.745432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b571b20-3451-40e3-90dd-100d2d8b7403","Type":"ContainerStarted","Data":"cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2"} Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.748129 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f952e7bb-8570-4caf-bb87-e84cd31506b9","Type":"ContainerStarted","Data":"cb0baf10275d4325e2e799ff1c3ec2919a3d1df76f1f6304579caefd33d1eda8"} Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.756618 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"55f0c5ec-efb9-4049-82fb-2b33d503e84e","Type":"ContainerStarted","Data":"0783d96fde439ce52cb5e2ac24eb8c18a9f847b257aef1ae8145b91c94152547"} Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.756646 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"55f0c5ec-efb9-4049-82fb-2b33d503e84e","Type":"ContainerStarted","Data":"ad49d7eb313bbc522005a946c9c11d3c67a25bbdab0845db4b37a5e8aa711e33"} Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.757220 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.827288 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=1.827239716 podStartE2EDuration="1.827239716s" podCreationTimestamp="2025-11-28 14:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:50:38.813663125 +0000 UTC m=+4888.937604562" watchObservedRunningTime="2025-11-28 14:50:38.827239716 +0000 UTC m=+4888.951181153" Nov 28 14:50:38 crc kubenswrapper[4857]: I1128 14:50:38.928558 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 28 14:50:39 crc kubenswrapper[4857]: I1128 14:50:39.766364 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"30b56f73-5bef-414c-bb50-9b8d7f5afd91","Type":"ContainerStarted","Data":"132cf78d908b7bc164a1e455d76bfadcecb4b50558dcfd2fbfc5632dd72941e2"} Nov 28 14:50:39 crc kubenswrapper[4857]: I1128 14:50:39.766844 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"30b56f73-5bef-414c-bb50-9b8d7f5afd91","Type":"ContainerStarted","Data":"999826fc46dce60cef3676fea253951648feb4e926012b13ad1a607ce1ac1327"} Nov 28 14:50:41 crc kubenswrapper[4857]: I1128 14:50:41.308479 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:50:41 crc kubenswrapper[4857]: I1128 14:50:41.308914 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:50:41 crc kubenswrapper[4857]: I1128 14:50:41.784267 4857 generic.go:334] "Generic (PLEG): container finished" podID="f952e7bb-8570-4caf-bb87-e84cd31506b9" containerID="cb0baf10275d4325e2e799ff1c3ec2919a3d1df76f1f6304579caefd33d1eda8" exitCode=0 Nov 28 14:50:41 crc kubenswrapper[4857]: I1128 14:50:41.784331 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f952e7bb-8570-4caf-bb87-e84cd31506b9","Type":"ContainerDied","Data":"cb0baf10275d4325e2e799ff1c3ec2919a3d1df76f1f6304579caefd33d1eda8"} Nov 28 14:50:42 crc kubenswrapper[4857]: I1128 14:50:42.796357 4857 generic.go:334] "Generic (PLEG): container finished" podID="30b56f73-5bef-414c-bb50-9b8d7f5afd91" containerID="132cf78d908b7bc164a1e455d76bfadcecb4b50558dcfd2fbfc5632dd72941e2" exitCode=0 Nov 28 14:50:42 crc kubenswrapper[4857]: I1128 14:50:42.796412 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"30b56f73-5bef-414c-bb50-9b8d7f5afd91","Type":"ContainerDied","Data":"132cf78d908b7bc164a1e455d76bfadcecb4b50558dcfd2fbfc5632dd72941e2"} Nov 28 14:50:42 crc kubenswrapper[4857]: I1128 14:50:42.801912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"f952e7bb-8570-4caf-bb87-e84cd31506b9","Type":"ContainerStarted","Data":"2c6508f38462a1d56b224a79f82797e2d696547703361abf10b6697ca8813aff"} Nov 28 14:50:42 crc kubenswrapper[4857]: I1128 14:50:42.863473 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=7.863435229 podStartE2EDuration="7.863435229s" podCreationTimestamp="2025-11-28 14:50:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:50:42.85370118 +0000 UTC m=+4892.977642717" watchObservedRunningTime="2025-11-28 14:50:42.863435229 +0000 UTC m=+4892.987376706" Nov 28 14:50:43 crc kubenswrapper[4857]: I1128 14:50:43.809754 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"30b56f73-5bef-414c-bb50-9b8d7f5afd91","Type":"ContainerStarted","Data":"376c9f4c719cc6091e50447c876919633ce5e1fcbabd7ad9e253deea7283e33e"} Nov 28 14:50:43 crc kubenswrapper[4857]: I1128 14:50:43.854850 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=6.854809928 podStartE2EDuration="6.854809928s" podCreationTimestamp="2025-11-28 14:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:50:43.840532218 +0000 UTC m=+4893.964473665" watchObservedRunningTime="2025-11-28 14:50:43.854809928 +0000 UTC m=+4893.978751405" Nov 28 14:50:44 crc kubenswrapper[4857]: I1128 14:50:44.792260 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:50:44 crc kubenswrapper[4857]: I1128 14:50:44.802065 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:44 crc kubenswrapper[4857]: I1128 14:50:44.874527 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-szjdl"] Nov 28 14:50:44 crc kubenswrapper[4857]: I1128 14:50:44.874842 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" podUID="6d048a01-3774-460f-be30-35633e7fca67" containerName="dnsmasq-dns" containerID="cri-o://10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068" gracePeriod=10 Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.407612 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.498001 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-config\") pod \"6d048a01-3774-460f-be30-35633e7fca67\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.498408 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-dns-svc\") pod \"6d048a01-3774-460f-be30-35633e7fca67\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.498564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqf8k\" (UniqueName: \"kubernetes.io/projected/6d048a01-3774-460f-be30-35633e7fca67-kube-api-access-fqf8k\") pod \"6d048a01-3774-460f-be30-35633e7fca67\" (UID: \"6d048a01-3774-460f-be30-35633e7fca67\") " Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.504910 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d048a01-3774-460f-be30-35633e7fca67-kube-api-access-fqf8k" (OuterVolumeSpecName: "kube-api-access-fqf8k") pod "6d048a01-3774-460f-be30-35633e7fca67" (UID: "6d048a01-3774-460f-be30-35633e7fca67"). InnerVolumeSpecName "kube-api-access-fqf8k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.543586 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6d048a01-3774-460f-be30-35633e7fca67" (UID: "6d048a01-3774-460f-be30-35633e7fca67"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.544624 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-config" (OuterVolumeSpecName: "config") pod "6d048a01-3774-460f-be30-35633e7fca67" (UID: "6d048a01-3774-460f-be30-35633e7fca67"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.599857 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.600269 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqf8k\" (UniqueName: \"kubernetes.io/projected/6d048a01-3774-460f-be30-35633e7fca67-kube-api-access-fqf8k\") on node \"crc\" DevicePath \"\"" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.600361 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d048a01-3774-460f-be30-35633e7fca67-config\") on node \"crc\" DevicePath \"\"" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.833633 4857 generic.go:334] "Generic (PLEG): container finished" podID="6d048a01-3774-460f-be30-35633e7fca67" containerID="10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068" exitCode=0 Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.833713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" event={"ID":"6d048a01-3774-460f-be30-35633e7fca67","Type":"ContainerDied","Data":"10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068"} Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.834519 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" event={"ID":"6d048a01-3774-460f-be30-35633e7fca67","Type":"ContainerDied","Data":"0bc3eb9840d521b87e200bbeda7c1cc46ccd9b2609d28210a8ad7611a1ebe6bc"} Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.834605 4857 scope.go:117] "RemoveContainer" containerID="10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.833787 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d7b5456f5-szjdl" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.857860 4857 scope.go:117] "RemoveContainer" containerID="d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.870781 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-szjdl"] Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.876671 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d7b5456f5-szjdl"] Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.899182 4857 scope.go:117] "RemoveContainer" containerID="10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068" Nov 28 14:50:45 crc kubenswrapper[4857]: E1128 14:50:45.900589 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068\": container with ID starting with 10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068 not found: ID does not exist" containerID="10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.900648 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068"} err="failed to get container status \"10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068\": rpc error: code = NotFound desc = could not find container \"10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068\": container with ID starting with 10baeb9bddd8f29c9b2bf0bfd9b6ed39bfdd56d56a27797c55acb177f24cd068 not found: ID does not exist" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.900689 4857 scope.go:117] "RemoveContainer" containerID="d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563" Nov 28 14:50:45 crc kubenswrapper[4857]: E1128 14:50:45.901234 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563\": container with ID starting with d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563 not found: ID does not exist" containerID="d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563" Nov 28 14:50:45 crc kubenswrapper[4857]: I1128 14:50:45.901288 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563"} err="failed to get container status \"d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563\": rpc error: code = NotFound desc = could not find container \"d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563\": container with ID starting with d9a717ff56151afd8519c47c1c1a5c82d0b9d6b011a3cb854a14d64f5d44f563 not found: ID does not exist" Nov 28 14:50:46 crc kubenswrapper[4857]: I1128 14:50:46.239165 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d048a01-3774-460f-be30-35633e7fca67" path="/var/lib/kubelet/pods/6d048a01-3774-460f-be30-35633e7fca67/volumes" Nov 28 14:50:47 crc kubenswrapper[4857]: I1128 14:50:47.145552 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 28 14:50:47 crc kubenswrapper[4857]: I1128 14:50:47.145615 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 28 14:50:47 crc kubenswrapper[4857]: I1128 14:50:47.542392 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 28 14:50:48 crc kubenswrapper[4857]: I1128 14:50:48.477857 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:48 crc kubenswrapper[4857]: I1128 14:50:48.478481 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:49 crc kubenswrapper[4857]: I1128 14:50:49.329480 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 28 14:50:49 crc kubenswrapper[4857]: I1128 14:50:49.422808 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 28 14:50:51 crc kubenswrapper[4857]: I1128 14:50:51.071555 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 28 14:50:51 crc kubenswrapper[4857]: I1128 14:50:51.152836 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 28 14:51:10 crc kubenswrapper[4857]: I1128 14:51:10.061613 4857 generic.go:334] "Generic (PLEG): container finished" podID="87a4c792-2171-4ac3-a785-497e679f4aff" containerID="8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b" exitCode=0 Nov 28 14:51:10 crc kubenswrapper[4857]: I1128 14:51:10.061689 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"87a4c792-2171-4ac3-a785-497e679f4aff","Type":"ContainerDied","Data":"8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b"} Nov 28 14:51:10 crc kubenswrapper[4857]: I1128 14:51:10.064517 4857 generic.go:334] "Generic (PLEG): container finished" podID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerID="cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2" exitCode=0 Nov 28 14:51:10 crc kubenswrapper[4857]: I1128 14:51:10.064569 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b571b20-3451-40e3-90dd-100d2d8b7403","Type":"ContainerDied","Data":"cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2"} Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.076111 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"87a4c792-2171-4ac3-a785-497e679f4aff","Type":"ContainerStarted","Data":"3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4"} Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.078730 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.081338 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b571b20-3451-40e3-90dd-100d2d8b7403","Type":"ContainerStarted","Data":"298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399"} Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.082482 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.104812 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.104787697 podStartE2EDuration="37.104787697s" podCreationTimestamp="2025-11-28 14:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:51:11.099813655 +0000 UTC m=+4921.223755092" watchObservedRunningTime="2025-11-28 14:51:11.104787697 +0000 UTC m=+4921.228729134" Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.129464 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.129443554 podStartE2EDuration="37.129443554s" podCreationTimestamp="2025-11-28 14:50:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:51:11.126233619 +0000 UTC m=+4921.250175066" watchObservedRunningTime="2025-11-28 14:51:11.129443554 +0000 UTC m=+4921.253384981" Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.309042 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:51:11 crc kubenswrapper[4857]: I1128 14:51:11.309114 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:51:25 crc kubenswrapper[4857]: I1128 14:51:25.664775 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 14:51:26 crc kubenswrapper[4857]: I1128 14:51:26.272563 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.258485 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-h9n7d"] Nov 28 14:51:31 crc kubenswrapper[4857]: E1128 14:51:31.259424 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d048a01-3774-460f-be30-35633e7fca67" containerName="init" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.259444 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d048a01-3774-460f-be30-35633e7fca67" containerName="init" Nov 28 14:51:31 crc kubenswrapper[4857]: E1128 14:51:31.259476 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d048a01-3774-460f-be30-35633e7fca67" containerName="dnsmasq-dns" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.259485 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d048a01-3774-460f-be30-35633e7fca67" containerName="dnsmasq-dns" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.259653 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d048a01-3774-460f-be30-35633e7fca67" containerName="dnsmasq-dns" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.260662 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.272824 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-h9n7d"] Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.318910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-config\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.319007 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.319049 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llqfg\" (UniqueName: \"kubernetes.io/projected/60abd7c6-2682-47cb-a624-108b876e1988-kube-api-access-llqfg\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.420432 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.420540 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llqfg\" (UniqueName: \"kubernetes.io/projected/60abd7c6-2682-47cb-a624-108b876e1988-kube-api-access-llqfg\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.420671 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-config\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.421864 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-dns-svc\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.421890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-config\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.444041 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llqfg\" (UniqueName: \"kubernetes.io/projected/60abd7c6-2682-47cb-a624-108b876e1988-kube-api-access-llqfg\") pod \"dnsmasq-dns-5b7946d7b9-h9n7d\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.584302 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:31 crc kubenswrapper[4857]: I1128 14:51:31.874763 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-h9n7d"] Nov 28 14:51:32 crc kubenswrapper[4857]: I1128 14:51:32.093979 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:51:32 crc kubenswrapper[4857]: I1128 14:51:32.250462 4857 generic.go:334] "Generic (PLEG): container finished" podID="60abd7c6-2682-47cb-a624-108b876e1988" containerID="3456c92fa9ac98bb0eda08d107ff5ab1deabca3d28be4b613174c15a28c7f295" exitCode=0 Nov 28 14:51:32 crc kubenswrapper[4857]: I1128 14:51:32.250524 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" event={"ID":"60abd7c6-2682-47cb-a624-108b876e1988","Type":"ContainerDied","Data":"3456c92fa9ac98bb0eda08d107ff5ab1deabca3d28be4b613174c15a28c7f295"} Nov 28 14:51:32 crc kubenswrapper[4857]: I1128 14:51:32.250562 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" event={"ID":"60abd7c6-2682-47cb-a624-108b876e1988","Type":"ContainerStarted","Data":"581af98d37924bd20f175afe979b9a1cecd3ccac0bd665a4644c9df039d88263"} Nov 28 14:51:32 crc kubenswrapper[4857]: I1128 14:51:32.677519 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:51:33 crc kubenswrapper[4857]: I1128 14:51:33.260178 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" event={"ID":"60abd7c6-2682-47cb-a624-108b876e1988","Type":"ContainerStarted","Data":"3fa40ccadd6faab302ff4e7393bac4e81cf391fa652aa15d59c8c82ef1df6d71"} Nov 28 14:51:33 crc kubenswrapper[4857]: I1128 14:51:33.260334 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:33 crc kubenswrapper[4857]: I1128 14:51:33.290233 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" podStartSLOduration=2.290206669 podStartE2EDuration="2.290206669s" podCreationTimestamp="2025-11-28 14:51:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:51:33.285487463 +0000 UTC m=+4943.409428920" watchObservedRunningTime="2025-11-28 14:51:33.290206669 +0000 UTC m=+4943.414148106" Nov 28 14:51:34 crc kubenswrapper[4857]: I1128 14:51:34.241718 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="rabbitmq" containerID="cri-o://3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4" gracePeriod=604798 Nov 28 14:51:34 crc kubenswrapper[4857]: I1128 14:51:34.512414 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="rabbitmq" containerID="cri-o://298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399" gracePeriod=604799 Nov 28 14:51:35 crc kubenswrapper[4857]: I1128 14:51:35.663933 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.241:5672: connect: connection refused" Nov 28 14:51:36 crc kubenswrapper[4857]: I1128 14:51:36.271758 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.242:5672: connect: connection refused" Nov 28 14:51:40 crc kubenswrapper[4857]: I1128 14:51:40.865914 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.042226 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-plugins\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.042342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/87a4c792-2171-4ac3-a785-497e679f4aff-pod-info\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.042393 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/87a4c792-2171-4ac3-a785-497e679f4aff-erlang-cookie-secret\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.042434 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-confd\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.042560 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-server-conf\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.042588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-erlang-cookie\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.043053 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.043150 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sp8wq\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-kube-api-access-sp8wq\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.043177 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-plugins-conf\") pod \"87a4c792-2171-4ac3-a785-497e679f4aff\" (UID: \"87a4c792-2171-4ac3-a785-497e679f4aff\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.046089 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.046135 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.046310 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.050878 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87a4c792-2171-4ac3-a785-497e679f4aff-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.051117 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-kube-api-access-sp8wq" (OuterVolumeSpecName: "kube-api-access-sp8wq") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "kube-api-access-sp8wq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.062650 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f" (OuterVolumeSpecName: "persistence") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.067353 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/87a4c792-2171-4ac3-a785-497e679f4aff-pod-info" (OuterVolumeSpecName: "pod-info") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.095500 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-server-conf" (OuterVolumeSpecName: "server-conf") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.107093 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145643 4857 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145682 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145722 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") on node \"crc\" " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145741 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sp8wq\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-kube-api-access-sp8wq\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145752 4857 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/87a4c792-2171-4ac3-a785-497e679f4aff-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145763 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145773 4857 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/87a4c792-2171-4ac3-a785-497e679f4aff-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.145784 4857 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/87a4c792-2171-4ac3-a785-497e679f4aff-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.156608 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "87a4c792-2171-4ac3-a785-497e679f4aff" (UID: "87a4c792-2171-4ac3-a785-497e679f4aff"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.173436 4857 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.173623 4857 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f") on node "crc" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246274 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-plugins\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246343 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwgwl\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-kube-api-access-rwgwl\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246384 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-confd\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246451 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-server-conf\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246510 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-erlang-cookie\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246534 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b571b20-3451-40e3-90dd-100d2d8b7403-erlang-cookie-secret\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246551 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-plugins-conf\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246588 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b571b20-3451-40e3-90dd-100d2d8b7403-pod-info\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.246714 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"3b571b20-3451-40e3-90dd-100d2d8b7403\" (UID: \"3b571b20-3451-40e3-90dd-100d2d8b7403\") " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.247019 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/87a4c792-2171-4ac3-a785-497e679f4aff-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.247039 4857 reconciler_common.go:293] "Volume detached for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.247176 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.247285 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.248452 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.250742 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3b571b20-3451-40e3-90dd-100d2d8b7403-pod-info" (OuterVolumeSpecName: "pod-info") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.254351 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b571b20-3451-40e3-90dd-100d2d8b7403-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.255877 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-kube-api-access-rwgwl" (OuterVolumeSpecName: "kube-api-access-rwgwl") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "kube-api-access-rwgwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.259327 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0" (OuterVolumeSpecName: "persistence") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.264622 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-server-conf" (OuterVolumeSpecName: "server-conf") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.308860 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.308928 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.309006 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.309663 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e069878b8272165d675b3e5b9b4751408dfbaf5d46c3ee50c635da1d18076ac5"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.309732 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://e069878b8272165d675b3e5b9b4751408dfbaf5d46c3ee50c635da1d18076ac5" gracePeriod=600 Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.320661 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3b571b20-3451-40e3-90dd-100d2d8b7403" (UID: "3b571b20-3451-40e3-90dd-100d2d8b7403"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.341628 4857 generic.go:334] "Generic (PLEG): container finished" podID="87a4c792-2171-4ac3-a785-497e679f4aff" containerID="3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4" exitCode=0 Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.341693 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"87a4c792-2171-4ac3-a785-497e679f4aff","Type":"ContainerDied","Data":"3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4"} Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.341785 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"87a4c792-2171-4ac3-a785-497e679f4aff","Type":"ContainerDied","Data":"c0fb340ec96212a2fe4099aaf307fa20f6048765b806c65f0e29b1bd9afa9f4c"} Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.341812 4857 scope.go:117] "RemoveContainer" containerID="3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.341884 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.344395 4857 generic.go:334] "Generic (PLEG): container finished" podID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerID="298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399" exitCode=0 Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.344437 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b571b20-3451-40e3-90dd-100d2d8b7403","Type":"ContainerDied","Data":"298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399"} Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.344464 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.344474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b571b20-3451-40e3-90dd-100d2d8b7403","Type":"ContainerDied","Data":"0ed5d24d982efd10f243cf3395ab030760b380b70a0e09b915726c538bbdfd78"} Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349304 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349333 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwgwl\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-kube-api-access-rwgwl\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349348 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349361 4857 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-server-conf\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349370 4857 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b571b20-3451-40e3-90dd-100d2d8b7403-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349379 4857 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b571b20-3451-40e3-90dd-100d2d8b7403-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349388 4857 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b571b20-3451-40e3-90dd-100d2d8b7403-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349399 4857 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b571b20-3451-40e3-90dd-100d2d8b7403-pod-info\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.349432 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") on node \"crc\" " Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.363740 4857 scope.go:117] "RemoveContainer" containerID="8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.371539 4857 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.371739 4857 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0") on node "crc" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.384098 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.394127 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.406740 4857 scope.go:117] "RemoveContainer" containerID="3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.409656 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4\": container with ID starting with 3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4 not found: ID does not exist" containerID="3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.409697 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4"} err="failed to get container status \"3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4\": rpc error: code = NotFound desc = could not find container \"3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4\": container with ID starting with 3c3029972ff53e26ba382fbe00d3522978329e793f7af37eac559cdf886050c4 not found: ID does not exist" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.409722 4857 scope.go:117] "RemoveContainer" containerID="8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.411772 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b\": container with ID starting with 8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b not found: ID does not exist" containerID="8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.411804 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b"} err="failed to get container status \"8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b\": rpc error: code = NotFound desc = could not find container \"8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b\": container with ID starting with 8f950633b444675ab0bb46cc1134c5058135e8aaeae9ae0eae914da200e0ca9b not found: ID does not exist" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.411823 4857 scope.go:117] "RemoveContainer" containerID="298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.419133 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.439968 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446089 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.446478 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="rabbitmq" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446495 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="rabbitmq" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.446519 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="setup-container" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446529 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="setup-container" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.446554 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="rabbitmq" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446562 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="rabbitmq" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.446576 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="setup-container" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446586 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="setup-container" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446803 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" containerName="rabbitmq" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.446822 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" containerName="rabbitmq" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.447931 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.451279 4857 scope.go:117] "RemoveContainer" containerID="cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.451378 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.451548 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.451673 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.451870 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.452083 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-88pth" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.454766 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.463891 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.465217 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.468604 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-2zqjt" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.468829 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.469019 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.469216 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.469382 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.470249 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.475326 4857 reconciler_common.go:293] "Volume detached for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.484318 4857 scope.go:117] "RemoveContainer" containerID="298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.485007 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399\": container with ID starting with 298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399 not found: ID does not exist" containerID="298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.485066 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399"} err="failed to get container status \"298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399\": rpc error: code = NotFound desc = could not find container \"298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399\": container with ID starting with 298bf42f39bc260228210ccfa03e51da236eec6a3e28773740665735f8637399 not found: ID does not exist" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.485102 4857 scope.go:117] "RemoveContainer" containerID="cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2" Nov 28 14:51:41 crc kubenswrapper[4857]: E1128 14:51:41.489699 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2\": container with ID starting with cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2 not found: ID does not exist" containerID="cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.489763 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2"} err="failed to get container status \"cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2\": rpc error: code = NotFound desc = could not find container \"cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2\": container with ID starting with cc66c8d0e197e347dbf30493e0f10508d265c27f001d9ccdbcc87fdbeda7d9d2 not found: ID does not exist" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576205 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7rt7\" (UniqueName: \"kubernetes.io/projected/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-kube-api-access-w7rt7\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576617 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576699 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576721 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576742 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576768 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576805 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.576823 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.587093 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.640032 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-mt6hz"] Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.640295 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" podUID="725dd93e-beeb-4412-9b51-0748187361f2" containerName="dnsmasq-dns" containerID="cri-o://6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae" gracePeriod=10 Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678340 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678401 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bf2f5ade-a981-4a9f-891c-83fd9bb77414-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678432 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678472 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bf2f5ade-a981-4a9f-891c-83fd9bb77414-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678496 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678625 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7rt7\" (UniqueName: \"kubernetes.io/projected/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-kube-api-access-w7rt7\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678780 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678858 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678890 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h54tt\" (UniqueName: \"kubernetes.io/projected/bf2f5ade-a981-4a9f-891c-83fd9bb77414-kube-api-access-h54tt\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.678992 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679044 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679074 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679101 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679144 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bf2f5ade-a981-4a9f-891c-83fd9bb77414-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679172 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679239 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679298 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.679990 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bf2f5ade-a981-4a9f-891c-83fd9bb77414-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.680157 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.680987 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.681544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.682604 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.682633 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/583aeee6825a956232d19ae3704718122e85db39aa0e96108b6210a7edfa3471/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.682679 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-server-conf\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.689904 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-pod-info\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.696122 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.697508 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7rt7\" (UniqueName: \"kubernetes.io/projected/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-kube-api-access-w7rt7\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.697579 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.720692 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-00afcd83-f129-451c-bd44-c1fcd8d77b2f\") pod \"rabbitmq-server-0\" (UID: \"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f\") " pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.783922 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bf2f5ade-a981-4a9f-891c-83fd9bb77414-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784001 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bf2f5ade-a981-4a9f-891c-83fd9bb77414-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784064 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784100 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h54tt\" (UniqueName: \"kubernetes.io/projected/bf2f5ade-a981-4a9f-891c-83fd9bb77414-kube-api-access-h54tt\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784128 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784165 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784183 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bf2f5ade-a981-4a9f-891c-83fd9bb77414-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784225 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.784245 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bf2f5ade-a981-4a9f-891c-83fd9bb77414-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.786013 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bf2f5ade-a981-4a9f-891c-83fd9bb77414-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.786625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.786860 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.787654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bf2f5ade-a981-4a9f-891c-83fd9bb77414-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.788559 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.791609 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/748cc75642fca8791130b40037ed5ae311d1be97c9be64fd3b675883dd653e88/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.790029 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bf2f5ade-a981-4a9f-891c-83fd9bb77414-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.790118 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bf2f5ade-a981-4a9f-891c-83fd9bb77414-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.792606 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bf2f5ade-a981-4a9f-891c-83fd9bb77414-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.807596 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h54tt\" (UniqueName: \"kubernetes.io/projected/bf2f5ade-a981-4a9f-891c-83fd9bb77414-kube-api-access-h54tt\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.823047 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7713a849-85c1-424e-8f74-404aa0c6f0d0\") pod \"rabbitmq-cell1-server-0\" (UID: \"bf2f5ade-a981-4a9f-891c-83fd9bb77414\") " pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.881701 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 28 14:51:41 crc kubenswrapper[4857]: I1128 14:51:41.894330 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.071350 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.193037 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-dns-svc\") pod \"725dd93e-beeb-4412-9b51-0748187361f2\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.193291 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjf4x\" (UniqueName: \"kubernetes.io/projected/725dd93e-beeb-4412-9b51-0748187361f2-kube-api-access-sjf4x\") pod \"725dd93e-beeb-4412-9b51-0748187361f2\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.193355 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-config\") pod \"725dd93e-beeb-4412-9b51-0748187361f2\" (UID: \"725dd93e-beeb-4412-9b51-0748187361f2\") " Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.198122 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/725dd93e-beeb-4412-9b51-0748187361f2-kube-api-access-sjf4x" (OuterVolumeSpecName: "kube-api-access-sjf4x") pod "725dd93e-beeb-4412-9b51-0748187361f2" (UID: "725dd93e-beeb-4412-9b51-0748187361f2"). InnerVolumeSpecName "kube-api-access-sjf4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.227771 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "725dd93e-beeb-4412-9b51-0748187361f2" (UID: "725dd93e-beeb-4412-9b51-0748187361f2"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.233618 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-config" (OuterVolumeSpecName: "config") pod "725dd93e-beeb-4412-9b51-0748187361f2" (UID: "725dd93e-beeb-4412-9b51-0748187361f2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.238925 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b571b20-3451-40e3-90dd-100d2d8b7403" path="/var/lib/kubelet/pods/3b571b20-3451-40e3-90dd-100d2d8b7403/volumes" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.239792 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87a4c792-2171-4ac3-a785-497e679f4aff" path="/var/lib/kubelet/pods/87a4c792-2171-4ac3-a785-497e679f4aff/volumes" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.296007 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjf4x\" (UniqueName: \"kubernetes.io/projected/725dd93e-beeb-4412-9b51-0748187361f2-kube-api-access-sjf4x\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.296046 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-config\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.296060 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/725dd93e-beeb-4412-9b51-0748187361f2-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.356974 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="e069878b8272165d675b3e5b9b4751408dfbaf5d46c3ee50c635da1d18076ac5" exitCode=0 Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.357074 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"e069878b8272165d675b3e5b9b4751408dfbaf5d46c3ee50c635da1d18076ac5"} Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.357118 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e"} Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.357141 4857 scope.go:117] "RemoveContainer" containerID="369091f600b21ab0f3801f31e3df4c4871285595a38c22eb5d54131bbe855e7a" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.363065 4857 generic.go:334] "Generic (PLEG): container finished" podID="725dd93e-beeb-4412-9b51-0748187361f2" containerID="6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae" exitCode=0 Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.363158 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" event={"ID":"725dd93e-beeb-4412-9b51-0748187361f2","Type":"ContainerDied","Data":"6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae"} Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.363193 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" event={"ID":"725dd93e-beeb-4412-9b51-0748187361f2","Type":"ContainerDied","Data":"bd54747dcbb2949c8c51c80e6c1df58137c31fc13c94fcd555172f921fc187ef"} Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.363309 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-98ddfc8f-mt6hz" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.403873 4857 scope.go:117] "RemoveContainer" containerID="6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae" Nov 28 14:51:42 crc kubenswrapper[4857]: W1128 14:51:42.407965 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28c5e6ac_e77c_4bc0_8d28_5f323f7fcb6f.slice/crio-2ab950f5cedf688b6f480e3938f617555d4fa51def03c6051c349b84dcf3a564 WatchSource:0}: Error finding container 2ab950f5cedf688b6f480e3938f617555d4fa51def03c6051c349b84dcf3a564: Status 404 returned error can't find the container with id 2ab950f5cedf688b6f480e3938f617555d4fa51def03c6051c349b84dcf3a564 Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.408308 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.414466 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-mt6hz"] Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.419860 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-98ddfc8f-mt6hz"] Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.421180 4857 scope.go:117] "RemoveContainer" containerID="0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.438202 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.444691 4857 scope.go:117] "RemoveContainer" containerID="6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae" Nov 28 14:51:42 crc kubenswrapper[4857]: E1128 14:51:42.445131 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae\": container with ID starting with 6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae not found: ID does not exist" containerID="6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.445169 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae"} err="failed to get container status \"6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae\": rpc error: code = NotFound desc = could not find container \"6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae\": container with ID starting with 6b997c2de871a07bbde36fc630efbcbb016972ba7f4035e7daf3ae9a7c5431ae not found: ID does not exist" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.445197 4857 scope.go:117] "RemoveContainer" containerID="0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823" Nov 28 14:51:42 crc kubenswrapper[4857]: E1128 14:51:42.446152 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823\": container with ID starting with 0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823 not found: ID does not exist" containerID="0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823" Nov 28 14:51:42 crc kubenswrapper[4857]: I1128 14:51:42.446194 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823"} err="failed to get container status \"0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823\": rpc error: code = NotFound desc = could not find container \"0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823\": container with ID starting with 0228bde86ab877a138530cce61e044978dd329a8cbe27603554ecb79551f5823 not found: ID does not exist" Nov 28 14:51:42 crc kubenswrapper[4857]: W1128 14:51:42.460049 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf2f5ade_a981_4a9f_891c_83fd9bb77414.slice/crio-9cf4086725fe8ace5d53ac58762382608e65342f27ac731ab7934a2ce73588b0 WatchSource:0}: Error finding container 9cf4086725fe8ace5d53ac58762382608e65342f27ac731ab7934a2ce73588b0: Status 404 returned error can't find the container with id 9cf4086725fe8ace5d53ac58762382608e65342f27ac731ab7934a2ce73588b0 Nov 28 14:51:43 crc kubenswrapper[4857]: I1128 14:51:43.375452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f","Type":"ContainerStarted","Data":"2ab950f5cedf688b6f480e3938f617555d4fa51def03c6051c349b84dcf3a564"} Nov 28 14:51:43 crc kubenswrapper[4857]: I1128 14:51:43.381002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bf2f5ade-a981-4a9f-891c-83fd9bb77414","Type":"ContainerStarted","Data":"9cf4086725fe8ace5d53ac58762382608e65342f27ac731ab7934a2ce73588b0"} Nov 28 14:51:44 crc kubenswrapper[4857]: I1128 14:51:44.247170 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="725dd93e-beeb-4412-9b51-0748187361f2" path="/var/lib/kubelet/pods/725dd93e-beeb-4412-9b51-0748187361f2/volumes" Nov 28 14:51:44 crc kubenswrapper[4857]: I1128 14:51:44.394495 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f","Type":"ContainerStarted","Data":"0ed40963952240a11e40a0109a22ac4cc0e89ada5fc13191758de6bfd26d77fd"} Nov 28 14:51:44 crc kubenswrapper[4857]: I1128 14:51:44.396723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bf2f5ade-a981-4a9f-891c-83fd9bb77414","Type":"ContainerStarted","Data":"4b095a010da7b8728e0615799b1f01b79be40a67ca80ad425be2d92a27d9a0dc"} Nov 28 14:52:17 crc kubenswrapper[4857]: I1128 14:52:17.710218 4857 generic.go:334] "Generic (PLEG): container finished" podID="bf2f5ade-a981-4a9f-891c-83fd9bb77414" containerID="4b095a010da7b8728e0615799b1f01b79be40a67ca80ad425be2d92a27d9a0dc" exitCode=0 Nov 28 14:52:17 crc kubenswrapper[4857]: I1128 14:52:17.710324 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bf2f5ade-a981-4a9f-891c-83fd9bb77414","Type":"ContainerDied","Data":"4b095a010da7b8728e0615799b1f01b79be40a67ca80ad425be2d92a27d9a0dc"} Nov 28 14:52:17 crc kubenswrapper[4857]: I1128 14:52:17.712995 4857 generic.go:334] "Generic (PLEG): container finished" podID="28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f" containerID="0ed40963952240a11e40a0109a22ac4cc0e89ada5fc13191758de6bfd26d77fd" exitCode=0 Nov 28 14:52:17 crc kubenswrapper[4857]: I1128 14:52:17.713055 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f","Type":"ContainerDied","Data":"0ed40963952240a11e40a0109a22ac4cc0e89ada5fc13191758de6bfd26d77fd"} Nov 28 14:52:18 crc kubenswrapper[4857]: I1128 14:52:18.726978 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bf2f5ade-a981-4a9f-891c-83fd9bb77414","Type":"ContainerStarted","Data":"c62c298a13874d8c5e942bd3c25ace12afb2398777519f9ab8d8df41e738e5d0"} Nov 28 14:52:18 crc kubenswrapper[4857]: I1128 14:52:18.728096 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:52:18 crc kubenswrapper[4857]: I1128 14:52:18.729028 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f","Type":"ContainerStarted","Data":"1b2d20f48837e3471bda3fad5c8d3cd95032cb72feb380080f95a334ce8b9635"} Nov 28 14:52:18 crc kubenswrapper[4857]: I1128 14:52:18.729209 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 28 14:52:18 crc kubenswrapper[4857]: I1128 14:52:18.762725 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.762695819 podStartE2EDuration="37.762695819s" podCreationTimestamp="2025-11-28 14:51:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:52:18.757968853 +0000 UTC m=+4988.881910290" watchObservedRunningTime="2025-11-28 14:52:18.762695819 +0000 UTC m=+4988.886637286" Nov 28 14:52:18 crc kubenswrapper[4857]: I1128 14:52:18.799024 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=37.798991896 podStartE2EDuration="37.798991896s" podCreationTimestamp="2025-11-28 14:51:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:52:18.778799118 +0000 UTC m=+4988.902740585" watchObservedRunningTime="2025-11-28 14:52:18.798991896 +0000 UTC m=+4988.922933373" Nov 28 14:52:31 crc kubenswrapper[4857]: I1128 14:52:31.885661 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 28 14:52:31 crc kubenswrapper[4857]: I1128 14:52:31.897111 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.359205 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 14:52:39 crc kubenswrapper[4857]: E1128 14:52:39.360612 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725dd93e-beeb-4412-9b51-0748187361f2" containerName="dnsmasq-dns" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.360636 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="725dd93e-beeb-4412-9b51-0748187361f2" containerName="dnsmasq-dns" Nov 28 14:52:39 crc kubenswrapper[4857]: E1128 14:52:39.360705 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="725dd93e-beeb-4412-9b51-0748187361f2" containerName="init" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.360719 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="725dd93e-beeb-4412-9b51-0748187361f2" containerName="init" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.361019 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="725dd93e-beeb-4412-9b51-0748187361f2" containerName="dnsmasq-dns" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.361889 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.367326 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-trntl" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.378101 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.496432 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhrkq\" (UniqueName: \"kubernetes.io/projected/46df0bc3-0c50-4a95-89d3-7576f5475e7e-kube-api-access-jhrkq\") pod \"mariadb-client-1-default\" (UID: \"46df0bc3-0c50-4a95-89d3-7576f5475e7e\") " pod="openstack/mariadb-client-1-default" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.598372 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhrkq\" (UniqueName: \"kubernetes.io/projected/46df0bc3-0c50-4a95-89d3-7576f5475e7e-kube-api-access-jhrkq\") pod \"mariadb-client-1-default\" (UID: \"46df0bc3-0c50-4a95-89d3-7576f5475e7e\") " pod="openstack/mariadb-client-1-default" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.621842 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhrkq\" (UniqueName: \"kubernetes.io/projected/46df0bc3-0c50-4a95-89d3-7576f5475e7e-kube-api-access-jhrkq\") pod \"mariadb-client-1-default\" (UID: \"46df0bc3-0c50-4a95-89d3-7576f5475e7e\") " pod="openstack/mariadb-client-1-default" Nov 28 14:52:39 crc kubenswrapper[4857]: I1128 14:52:39.693585 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 14:52:40 crc kubenswrapper[4857]: I1128 14:52:40.296445 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 14:52:40 crc kubenswrapper[4857]: I1128 14:52:40.950212 4857 generic.go:334] "Generic (PLEG): container finished" podID="46df0bc3-0c50-4a95-89d3-7576f5475e7e" containerID="1227345c6c1b250b7d629c71ef4fd08a23ea0f619a4f20395322d31771f71c6b" exitCode=0 Nov 28 14:52:40 crc kubenswrapper[4857]: I1128 14:52:40.950301 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"46df0bc3-0c50-4a95-89d3-7576f5475e7e","Type":"ContainerDied","Data":"1227345c6c1b250b7d629c71ef4fd08a23ea0f619a4f20395322d31771f71c6b"} Nov 28 14:52:40 crc kubenswrapper[4857]: I1128 14:52:40.950365 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"46df0bc3-0c50-4a95-89d3-7576f5475e7e","Type":"ContainerStarted","Data":"7626ce0d70e2a76b645c808dc7ca9d05ade27f48afea48e2ea26fd4f2cda295e"} Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.403918 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.433286 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_46df0bc3-0c50-4a95-89d3-7576f5475e7e/mariadb-client-1-default/0.log" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.461210 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.466878 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.549767 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhrkq\" (UniqueName: \"kubernetes.io/projected/46df0bc3-0c50-4a95-89d3-7576f5475e7e-kube-api-access-jhrkq\") pod \"46df0bc3-0c50-4a95-89d3-7576f5475e7e\" (UID: \"46df0bc3-0c50-4a95-89d3-7576f5475e7e\") " Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.562462 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46df0bc3-0c50-4a95-89d3-7576f5475e7e-kube-api-access-jhrkq" (OuterVolumeSpecName: "kube-api-access-jhrkq") pod "46df0bc3-0c50-4a95-89d3-7576f5475e7e" (UID: "46df0bc3-0c50-4a95-89d3-7576f5475e7e"). InnerVolumeSpecName "kube-api-access-jhrkq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.651158 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhrkq\" (UniqueName: \"kubernetes.io/projected/46df0bc3-0c50-4a95-89d3-7576f5475e7e-kube-api-access-jhrkq\") on node \"crc\" DevicePath \"\"" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.960659 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 14:52:42 crc kubenswrapper[4857]: E1128 14:52:42.961229 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46df0bc3-0c50-4a95-89d3-7576f5475e7e" containerName="mariadb-client-1-default" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.961248 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46df0bc3-0c50-4a95-89d3-7576f5475e7e" containerName="mariadb-client-1-default" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.961488 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46df0bc3-0c50-4a95-89d3-7576f5475e7e" containerName="mariadb-client-1-default" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.962811 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.969371 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7626ce0d70e2a76b645c808dc7ca9d05ade27f48afea48e2ea26fd4f2cda295e" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.969518 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 28 14:52:42 crc kubenswrapper[4857]: I1128 14:52:42.973380 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 14:52:43 crc kubenswrapper[4857]: I1128 14:52:43.057999 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8brgx\" (UniqueName: \"kubernetes.io/projected/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974-kube-api-access-8brgx\") pod \"mariadb-client-2-default\" (UID: \"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974\") " pod="openstack/mariadb-client-2-default" Nov 28 14:52:43 crc kubenswrapper[4857]: I1128 14:52:43.160364 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8brgx\" (UniqueName: \"kubernetes.io/projected/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974-kube-api-access-8brgx\") pod \"mariadb-client-2-default\" (UID: \"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974\") " pod="openstack/mariadb-client-2-default" Nov 28 14:52:43 crc kubenswrapper[4857]: I1128 14:52:43.186326 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8brgx\" (UniqueName: \"kubernetes.io/projected/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974-kube-api-access-8brgx\") pod \"mariadb-client-2-default\" (UID: \"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974\") " pod="openstack/mariadb-client-2-default" Nov 28 14:52:43 crc kubenswrapper[4857]: I1128 14:52:43.313912 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 14:52:43 crc kubenswrapper[4857]: I1128 14:52:43.902662 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 14:52:43 crc kubenswrapper[4857]: W1128 14:52:43.908163 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d1b7e8f_8356_43f7_ae7f_8da4d3ca2974.slice/crio-16011341081d3acf47c3e76cbf90aa4dcd40ceceacf42ce04155ee5156ee8134 WatchSource:0}: Error finding container 16011341081d3acf47c3e76cbf90aa4dcd40ceceacf42ce04155ee5156ee8134: Status 404 returned error can't find the container with id 16011341081d3acf47c3e76cbf90aa4dcd40ceceacf42ce04155ee5156ee8134 Nov 28 14:52:43 crc kubenswrapper[4857]: I1128 14:52:43.983057 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974","Type":"ContainerStarted","Data":"16011341081d3acf47c3e76cbf90aa4dcd40ceceacf42ce04155ee5156ee8134"} Nov 28 14:52:44 crc kubenswrapper[4857]: I1128 14:52:44.259658 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46df0bc3-0c50-4a95-89d3-7576f5475e7e" path="/var/lib/kubelet/pods/46df0bc3-0c50-4a95-89d3-7576f5475e7e/volumes" Nov 28 14:52:44 crc kubenswrapper[4857]: I1128 14:52:44.996263 4857 generic.go:334] "Generic (PLEG): container finished" podID="2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974" containerID="3329be992d61209ed52de6fbe7ef61e67bf87720819f8e7d908b083c09c80607" exitCode=1 Nov 28 14:52:44 crc kubenswrapper[4857]: I1128 14:52:44.996342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974","Type":"ContainerDied","Data":"3329be992d61209ed52de6fbe7ef61e67bf87720819f8e7d908b083c09c80607"} Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.409062 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.430920 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2-default_2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974/mariadb-client-2-default/0.log" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.460050 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.470648 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.516736 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8brgx\" (UniqueName: \"kubernetes.io/projected/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974-kube-api-access-8brgx\") pod \"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974\" (UID: \"2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974\") " Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.523835 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974-kube-api-access-8brgx" (OuterVolumeSpecName: "kube-api-access-8brgx") pod "2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974" (UID: "2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974"). InnerVolumeSpecName "kube-api-access-8brgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.618682 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8brgx\" (UniqueName: \"kubernetes.io/projected/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974-kube-api-access-8brgx\") on node \"crc\" DevicePath \"\"" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.929961 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 28 14:52:46 crc kubenswrapper[4857]: E1128 14:52:46.930308 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974" containerName="mariadb-client-2-default" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.930323 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974" containerName="mariadb-client-2-default" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.930450 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974" containerName="mariadb-client-2-default" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.930912 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 14:52:46 crc kubenswrapper[4857]: I1128 14:52:46.939343 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.016537 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16011341081d3acf47c3e76cbf90aa4dcd40ceceacf42ce04155ee5156ee8134" Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.016604 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.024351 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhq6k\" (UniqueName: \"kubernetes.io/projected/3091f454-69e6-420d-849e-f35fe9e918c5-kube-api-access-dhq6k\") pod \"mariadb-client-1\" (UID: \"3091f454-69e6-420d-849e-f35fe9e918c5\") " pod="openstack/mariadb-client-1" Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.125552 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhq6k\" (UniqueName: \"kubernetes.io/projected/3091f454-69e6-420d-849e-f35fe9e918c5-kube-api-access-dhq6k\") pod \"mariadb-client-1\" (UID: \"3091f454-69e6-420d-849e-f35fe9e918c5\") " pod="openstack/mariadb-client-1" Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.147388 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhq6k\" (UniqueName: \"kubernetes.io/projected/3091f454-69e6-420d-849e-f35fe9e918c5-kube-api-access-dhq6k\") pod \"mariadb-client-1\" (UID: \"3091f454-69e6-420d-849e-f35fe9e918c5\") " pod="openstack/mariadb-client-1" Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.255074 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 14:52:47 crc kubenswrapper[4857]: I1128 14:52:47.596487 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 14:52:47 crc kubenswrapper[4857]: W1128 14:52:47.601462 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3091f454_69e6_420d_849e_f35fe9e918c5.slice/crio-04cf7800c0031380dcdf2ff63c4dd8503f10cc7ad18543c112eae9dc62f68fdf WatchSource:0}: Error finding container 04cf7800c0031380dcdf2ff63c4dd8503f10cc7ad18543c112eae9dc62f68fdf: Status 404 returned error can't find the container with id 04cf7800c0031380dcdf2ff63c4dd8503f10cc7ad18543c112eae9dc62f68fdf Nov 28 14:52:48 crc kubenswrapper[4857]: I1128 14:52:48.024364 4857 generic.go:334] "Generic (PLEG): container finished" podID="3091f454-69e6-420d-849e-f35fe9e918c5" containerID="4571ef6083c534717395fddb9b91748050f9b5d47af431a336aaec82d893ec6e" exitCode=0 Nov 28 14:52:48 crc kubenswrapper[4857]: I1128 14:52:48.024408 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"3091f454-69e6-420d-849e-f35fe9e918c5","Type":"ContainerDied","Data":"4571ef6083c534717395fddb9b91748050f9b5d47af431a336aaec82d893ec6e"} Nov 28 14:52:48 crc kubenswrapper[4857]: I1128 14:52:48.024432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"3091f454-69e6-420d-849e-f35fe9e918c5","Type":"ContainerStarted","Data":"04cf7800c0031380dcdf2ff63c4dd8503f10cc7ad18543c112eae9dc62f68fdf"} Nov 28 14:52:48 crc kubenswrapper[4857]: I1128 14:52:48.247362 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974" path="/var/lib/kubelet/pods/2d1b7e8f-8356-43f7-ae7f-8da4d3ca2974/volumes" Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.455111 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.478704 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_3091f454-69e6-420d-849e-f35fe9e918c5/mariadb-client-1/0.log" Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.512995 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.519212 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.570214 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhq6k\" (UniqueName: \"kubernetes.io/projected/3091f454-69e6-420d-849e-f35fe9e918c5-kube-api-access-dhq6k\") pod \"3091f454-69e6-420d-849e-f35fe9e918c5\" (UID: \"3091f454-69e6-420d-849e-f35fe9e918c5\") " Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.580267 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3091f454-69e6-420d-849e-f35fe9e918c5-kube-api-access-dhq6k" (OuterVolumeSpecName: "kube-api-access-dhq6k") pod "3091f454-69e6-420d-849e-f35fe9e918c5" (UID: "3091f454-69e6-420d-849e-f35fe9e918c5"). InnerVolumeSpecName "kube-api-access-dhq6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:52:49 crc kubenswrapper[4857]: I1128 14:52:49.672096 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhq6k\" (UniqueName: \"kubernetes.io/projected/3091f454-69e6-420d-849e-f35fe9e918c5-kube-api-access-dhq6k\") on node \"crc\" DevicePath \"\"" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.005116 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 14:52:50 crc kubenswrapper[4857]: E1128 14:52:50.005438 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3091f454-69e6-420d-849e-f35fe9e918c5" containerName="mariadb-client-1" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.005454 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3091f454-69e6-420d-849e-f35fe9e918c5" containerName="mariadb-client-1" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.005625 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3091f454-69e6-420d-849e-f35fe9e918c5" containerName="mariadb-client-1" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.006163 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.011636 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.041899 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04cf7800c0031380dcdf2ff63c4dd8503f10cc7ad18543c112eae9dc62f68fdf" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.042000 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.178917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwjtd\" (UniqueName: \"kubernetes.io/projected/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9-kube-api-access-wwjtd\") pod \"mariadb-client-4-default\" (UID: \"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9\") " pod="openstack/mariadb-client-4-default" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.241353 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3091f454-69e6-420d-849e-f35fe9e918c5" path="/var/lib/kubelet/pods/3091f454-69e6-420d-849e-f35fe9e918c5/volumes" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.280661 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwjtd\" (UniqueName: \"kubernetes.io/projected/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9-kube-api-access-wwjtd\") pod \"mariadb-client-4-default\" (UID: \"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9\") " pod="openstack/mariadb-client-4-default" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.301283 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwjtd\" (UniqueName: \"kubernetes.io/projected/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9-kube-api-access-wwjtd\") pod \"mariadb-client-4-default\" (UID: \"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9\") " pod="openstack/mariadb-client-4-default" Nov 28 14:52:50 crc kubenswrapper[4857]: I1128 14:52:50.328463 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 14:52:51 crc kubenswrapper[4857]: I1128 14:52:51.377955 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 14:52:51 crc kubenswrapper[4857]: W1128 14:52:51.388044 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbdfa29a8_96b1_4579_b08e_8bfb920eb5a9.slice/crio-6e0fad71fb8ab710f23ded1d33169551e25eabb3c7a23817f2a6184b9921e546 WatchSource:0}: Error finding container 6e0fad71fb8ab710f23ded1d33169551e25eabb3c7a23817f2a6184b9921e546: Status 404 returned error can't find the container with id 6e0fad71fb8ab710f23ded1d33169551e25eabb3c7a23817f2a6184b9921e546 Nov 28 14:52:52 crc kubenswrapper[4857]: I1128 14:52:52.055343 4857 generic.go:334] "Generic (PLEG): container finished" podID="bdfa29a8-96b1-4579-b08e-8bfb920eb5a9" containerID="34b6c471388ade7c1ca12f7030a3c4a2762c97e2aa0fcfd2d39276f604dc6eea" exitCode=0 Nov 28 14:52:52 crc kubenswrapper[4857]: I1128 14:52:52.055386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9","Type":"ContainerDied","Data":"34b6c471388ade7c1ca12f7030a3c4a2762c97e2aa0fcfd2d39276f604dc6eea"} Nov 28 14:52:52 crc kubenswrapper[4857]: I1128 14:52:52.055412 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9","Type":"ContainerStarted","Data":"6e0fad71fb8ab710f23ded1d33169551e25eabb3c7a23817f2a6184b9921e546"} Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.419244 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.439543 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_bdfa29a8-96b1-4579-b08e-8bfb920eb5a9/mariadb-client-4-default/0.log" Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.473672 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.483583 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.535891 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwjtd\" (UniqueName: \"kubernetes.io/projected/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9-kube-api-access-wwjtd\") pod \"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9\" (UID: \"bdfa29a8-96b1-4579-b08e-8bfb920eb5a9\") " Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.544139 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9-kube-api-access-wwjtd" (OuterVolumeSpecName: "kube-api-access-wwjtd") pod "bdfa29a8-96b1-4579-b08e-8bfb920eb5a9" (UID: "bdfa29a8-96b1-4579-b08e-8bfb920eb5a9"). InnerVolumeSpecName "kube-api-access-wwjtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:52:53 crc kubenswrapper[4857]: I1128 14:52:53.638023 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwjtd\" (UniqueName: \"kubernetes.io/projected/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9-kube-api-access-wwjtd\") on node \"crc\" DevicePath \"\"" Nov 28 14:52:54 crc kubenswrapper[4857]: I1128 14:52:54.073104 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e0fad71fb8ab710f23ded1d33169551e25eabb3c7a23817f2a6184b9921e546" Nov 28 14:52:54 crc kubenswrapper[4857]: I1128 14:52:54.073158 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 28 14:52:54 crc kubenswrapper[4857]: I1128 14:52:54.241318 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdfa29a8-96b1-4579-b08e-8bfb920eb5a9" path="/var/lib/kubelet/pods/bdfa29a8-96b1-4579-b08e-8bfb920eb5a9/volumes" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.279535 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 14:52:57 crc kubenswrapper[4857]: E1128 14:52:57.280133 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdfa29a8-96b1-4579-b08e-8bfb920eb5a9" containerName="mariadb-client-4-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.280146 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdfa29a8-96b1-4579-b08e-8bfb920eb5a9" containerName="mariadb-client-4-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.280310 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdfa29a8-96b1-4579-b08e-8bfb920eb5a9" containerName="mariadb-client-4-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.280819 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.284026 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-trntl" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.298508 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.314730 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n67jr\" (UniqueName: \"kubernetes.io/projected/f34bf29f-daac-4032-acc8-686d297d817f-kube-api-access-n67jr\") pod \"mariadb-client-5-default\" (UID: \"f34bf29f-daac-4032-acc8-686d297d817f\") " pod="openstack/mariadb-client-5-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.416776 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n67jr\" (UniqueName: \"kubernetes.io/projected/f34bf29f-daac-4032-acc8-686d297d817f-kube-api-access-n67jr\") pod \"mariadb-client-5-default\" (UID: \"f34bf29f-daac-4032-acc8-686d297d817f\") " pod="openstack/mariadb-client-5-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.441902 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n67jr\" (UniqueName: \"kubernetes.io/projected/f34bf29f-daac-4032-acc8-686d297d817f-kube-api-access-n67jr\") pod \"mariadb-client-5-default\" (UID: \"f34bf29f-daac-4032-acc8-686d297d817f\") " pod="openstack/mariadb-client-5-default" Nov 28 14:52:57 crc kubenswrapper[4857]: I1128 14:52:57.638879 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 14:52:58 crc kubenswrapper[4857]: I1128 14:52:58.133119 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 14:52:59 crc kubenswrapper[4857]: I1128 14:52:59.112989 4857 generic.go:334] "Generic (PLEG): container finished" podID="f34bf29f-daac-4032-acc8-686d297d817f" containerID="12e4b9aec4e908a5b324876cc69c4e82352b942ca1dc187454dc666c72af60ec" exitCode=0 Nov 28 14:52:59 crc kubenswrapper[4857]: I1128 14:52:59.113098 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"f34bf29f-daac-4032-acc8-686d297d817f","Type":"ContainerDied","Data":"12e4b9aec4e908a5b324876cc69c4e82352b942ca1dc187454dc666c72af60ec"} Nov 28 14:52:59 crc kubenswrapper[4857]: I1128 14:52:59.113368 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"f34bf29f-daac-4032-acc8-686d297d817f","Type":"ContainerStarted","Data":"f17f73c7894eeb2a0ecb5ebe001760b24074b840c15631eb1ae2a0febf695a85"} Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.539229 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.565150 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_f34bf29f-daac-4032-acc8-686d297d817f/mariadb-client-5-default/0.log" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.593171 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.598775 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.668984 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n67jr\" (UniqueName: \"kubernetes.io/projected/f34bf29f-daac-4032-acc8-686d297d817f-kube-api-access-n67jr\") pod \"f34bf29f-daac-4032-acc8-686d297d817f\" (UID: \"f34bf29f-daac-4032-acc8-686d297d817f\") " Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.682248 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f34bf29f-daac-4032-acc8-686d297d817f-kube-api-access-n67jr" (OuterVolumeSpecName: "kube-api-access-n67jr") pod "f34bf29f-daac-4032-acc8-686d297d817f" (UID: "f34bf29f-daac-4032-acc8-686d297d817f"). InnerVolumeSpecName "kube-api-access-n67jr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.750803 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 14:53:00 crc kubenswrapper[4857]: E1128 14:53:00.751318 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f34bf29f-daac-4032-acc8-686d297d817f" containerName="mariadb-client-5-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.751339 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f34bf29f-daac-4032-acc8-686d297d817f" containerName="mariadb-client-5-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.751565 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f34bf29f-daac-4032-acc8-686d297d817f" containerName="mariadb-client-5-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.752386 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.761520 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.771700 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n67jr\" (UniqueName: \"kubernetes.io/projected/f34bf29f-daac-4032-acc8-686d297d817f-kube-api-access-n67jr\") on node \"crc\" DevicePath \"\"" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.873925 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74rqh\" (UniqueName: \"kubernetes.io/projected/fc2e3124-8fde-43ee-ad52-e055d494fc2f-kube-api-access-74rqh\") pod \"mariadb-client-6-default\" (UID: \"fc2e3124-8fde-43ee-ad52-e055d494fc2f\") " pod="openstack/mariadb-client-6-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.976346 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74rqh\" (UniqueName: \"kubernetes.io/projected/fc2e3124-8fde-43ee-ad52-e055d494fc2f-kube-api-access-74rqh\") pod \"mariadb-client-6-default\" (UID: \"fc2e3124-8fde-43ee-ad52-e055d494fc2f\") " pod="openstack/mariadb-client-6-default" Nov 28 14:53:00 crc kubenswrapper[4857]: I1128 14:53:00.998420 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74rqh\" (UniqueName: \"kubernetes.io/projected/fc2e3124-8fde-43ee-ad52-e055d494fc2f-kube-api-access-74rqh\") pod \"mariadb-client-6-default\" (UID: \"fc2e3124-8fde-43ee-ad52-e055d494fc2f\") " pod="openstack/mariadb-client-6-default" Nov 28 14:53:01 crc kubenswrapper[4857]: I1128 14:53:01.069329 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 14:53:01 crc kubenswrapper[4857]: I1128 14:53:01.133499 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f17f73c7894eeb2a0ecb5ebe001760b24074b840c15631eb1ae2a0febf695a85" Nov 28 14:53:01 crc kubenswrapper[4857]: I1128 14:53:01.133659 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 28 14:53:01 crc kubenswrapper[4857]: I1128 14:53:01.623022 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 14:53:01 crc kubenswrapper[4857]: W1128 14:53:01.628222 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc2e3124_8fde_43ee_ad52_e055d494fc2f.slice/crio-d8e62d8f0528ea43c5f43379c730822b1af312e9a416c06fbc98f95907f10660 WatchSource:0}: Error finding container d8e62d8f0528ea43c5f43379c730822b1af312e9a416c06fbc98f95907f10660: Status 404 returned error can't find the container with id d8e62d8f0528ea43c5f43379c730822b1af312e9a416c06fbc98f95907f10660 Nov 28 14:53:02 crc kubenswrapper[4857]: I1128 14:53:02.145593 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"fc2e3124-8fde-43ee-ad52-e055d494fc2f","Type":"ContainerStarted","Data":"c68952e32acee964b334e8293f6946c4c75aaf6ed121b14feb50d1035595703d"} Nov 28 14:53:02 crc kubenswrapper[4857]: I1128 14:53:02.145645 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"fc2e3124-8fde-43ee-ad52-e055d494fc2f","Type":"ContainerStarted","Data":"d8e62d8f0528ea43c5f43379c730822b1af312e9a416c06fbc98f95907f10660"} Nov 28 14:53:02 crc kubenswrapper[4857]: I1128 14:53:02.172778 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=2.172755908 podStartE2EDuration="2.172755908s" podCreationTimestamp="2025-11-28 14:53:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:53:02.162571136 +0000 UTC m=+5032.286512613" watchObservedRunningTime="2025-11-28 14:53:02.172755908 +0000 UTC m=+5032.296697345" Nov 28 14:53:02 crc kubenswrapper[4857]: I1128 14:53:02.250873 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f34bf29f-daac-4032-acc8-686d297d817f" path="/var/lib/kubelet/pods/f34bf29f-daac-4032-acc8-686d297d817f/volumes" Nov 28 14:53:03 crc kubenswrapper[4857]: I1128 14:53:03.158417 4857 generic.go:334] "Generic (PLEG): container finished" podID="fc2e3124-8fde-43ee-ad52-e055d494fc2f" containerID="c68952e32acee964b334e8293f6946c4c75aaf6ed121b14feb50d1035595703d" exitCode=1 Nov 28 14:53:03 crc kubenswrapper[4857]: I1128 14:53:03.158463 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"fc2e3124-8fde-43ee-ad52-e055d494fc2f","Type":"ContainerDied","Data":"c68952e32acee964b334e8293f6946c4c75aaf6ed121b14feb50d1035595703d"} Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.548207 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.595051 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.601920 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.660551 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74rqh\" (UniqueName: \"kubernetes.io/projected/fc2e3124-8fde-43ee-ad52-e055d494fc2f-kube-api-access-74rqh\") pod \"fc2e3124-8fde-43ee-ad52-e055d494fc2f\" (UID: \"fc2e3124-8fde-43ee-ad52-e055d494fc2f\") " Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.667000 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc2e3124-8fde-43ee-ad52-e055d494fc2f-kube-api-access-74rqh" (OuterVolumeSpecName: "kube-api-access-74rqh") pod "fc2e3124-8fde-43ee-ad52-e055d494fc2f" (UID: "fc2e3124-8fde-43ee-ad52-e055d494fc2f"). InnerVolumeSpecName "kube-api-access-74rqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.744677 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 14:53:04 crc kubenswrapper[4857]: E1128 14:53:04.744986 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc2e3124-8fde-43ee-ad52-e055d494fc2f" containerName="mariadb-client-6-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.745002 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc2e3124-8fde-43ee-ad52-e055d494fc2f" containerName="mariadb-client-6-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.745162 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc2e3124-8fde-43ee-ad52-e055d494fc2f" containerName="mariadb-client-6-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.745711 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.759697 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.762597 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxtmg\" (UniqueName: \"kubernetes.io/projected/440a5434-e29e-4d81-bbea-f985bafd8bec-kube-api-access-bxtmg\") pod \"mariadb-client-7-default\" (UID: \"440a5434-e29e-4d81-bbea-f985bafd8bec\") " pod="openstack/mariadb-client-7-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.762663 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74rqh\" (UniqueName: \"kubernetes.io/projected/fc2e3124-8fde-43ee-ad52-e055d494fc2f-kube-api-access-74rqh\") on node \"crc\" DevicePath \"\"" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.863557 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxtmg\" (UniqueName: \"kubernetes.io/projected/440a5434-e29e-4d81-bbea-f985bafd8bec-kube-api-access-bxtmg\") pod \"mariadb-client-7-default\" (UID: \"440a5434-e29e-4d81-bbea-f985bafd8bec\") " pod="openstack/mariadb-client-7-default" Nov 28 14:53:04 crc kubenswrapper[4857]: I1128 14:53:04.880714 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxtmg\" (UniqueName: \"kubernetes.io/projected/440a5434-e29e-4d81-bbea-f985bafd8bec-kube-api-access-bxtmg\") pod \"mariadb-client-7-default\" (UID: \"440a5434-e29e-4d81-bbea-f985bafd8bec\") " pod="openstack/mariadb-client-7-default" Nov 28 14:53:05 crc kubenswrapper[4857]: I1128 14:53:05.063710 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 14:53:05 crc kubenswrapper[4857]: I1128 14:53:05.186372 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8e62d8f0528ea43c5f43379c730822b1af312e9a416c06fbc98f95907f10660" Nov 28 14:53:05 crc kubenswrapper[4857]: I1128 14:53:05.186438 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 28 14:53:05 crc kubenswrapper[4857]: I1128 14:53:05.625831 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 14:53:06 crc kubenswrapper[4857]: I1128 14:53:06.195711 4857 generic.go:334] "Generic (PLEG): container finished" podID="440a5434-e29e-4d81-bbea-f985bafd8bec" containerID="bf3886ebf17a80e05fc67649b5e4d9d402f101838495f5623b8d7c742ca9d1c6" exitCode=0 Nov 28 14:53:06 crc kubenswrapper[4857]: I1128 14:53:06.195790 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"440a5434-e29e-4d81-bbea-f985bafd8bec","Type":"ContainerDied","Data":"bf3886ebf17a80e05fc67649b5e4d9d402f101838495f5623b8d7c742ca9d1c6"} Nov 28 14:53:06 crc kubenswrapper[4857]: I1128 14:53:06.195858 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"440a5434-e29e-4d81-bbea-f985bafd8bec","Type":"ContainerStarted","Data":"b21c993c327f87a23a54e807b0dd87ff5645e6defcbaf2db8deadab1370ce911"} Nov 28 14:53:06 crc kubenswrapper[4857]: I1128 14:53:06.238367 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc2e3124-8fde-43ee-ad52-e055d494fc2f" path="/var/lib/kubelet/pods/fc2e3124-8fde-43ee-ad52-e055d494fc2f/volumes" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.551705 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.569863 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_440a5434-e29e-4d81-bbea-f985bafd8bec/mariadb-client-7-default/0.log" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.594257 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.601319 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxtmg\" (UniqueName: \"kubernetes.io/projected/440a5434-e29e-4d81-bbea-f985bafd8bec-kube-api-access-bxtmg\") pod \"440a5434-e29e-4d81-bbea-f985bafd8bec\" (UID: \"440a5434-e29e-4d81-bbea-f985bafd8bec\") " Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.604639 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.609847 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/440a5434-e29e-4d81-bbea-f985bafd8bec-kube-api-access-bxtmg" (OuterVolumeSpecName: "kube-api-access-bxtmg") pod "440a5434-e29e-4d81-bbea-f985bafd8bec" (UID: "440a5434-e29e-4d81-bbea-f985bafd8bec"). InnerVolumeSpecName "kube-api-access-bxtmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.704046 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxtmg\" (UniqueName: \"kubernetes.io/projected/440a5434-e29e-4d81-bbea-f985bafd8bec-kube-api-access-bxtmg\") on node \"crc\" DevicePath \"\"" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.725434 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 28 14:53:07 crc kubenswrapper[4857]: E1128 14:53:07.725750 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="440a5434-e29e-4d81-bbea-f985bafd8bec" containerName="mariadb-client-7-default" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.725761 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="440a5434-e29e-4d81-bbea-f985bafd8bec" containerName="mariadb-client-7-default" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.725915 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="440a5434-e29e-4d81-bbea-f985bafd8bec" containerName="mariadb-client-7-default" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.726429 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.740977 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.805881 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrqd5\" (UniqueName: \"kubernetes.io/projected/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6-kube-api-access-lrqd5\") pod \"mariadb-client-2\" (UID: \"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6\") " pod="openstack/mariadb-client-2" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.907371 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrqd5\" (UniqueName: \"kubernetes.io/projected/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6-kube-api-access-lrqd5\") pod \"mariadb-client-2\" (UID: \"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6\") " pod="openstack/mariadb-client-2" Nov 28 14:53:07 crc kubenswrapper[4857]: I1128 14:53:07.939487 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrqd5\" (UniqueName: \"kubernetes.io/projected/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6-kube-api-access-lrqd5\") pod \"mariadb-client-2\" (UID: \"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6\") " pod="openstack/mariadb-client-2" Nov 28 14:53:08 crc kubenswrapper[4857]: I1128 14:53:08.058786 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 14:53:08 crc kubenswrapper[4857]: I1128 14:53:08.218121 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b21c993c327f87a23a54e807b0dd87ff5645e6defcbaf2db8deadab1370ce911" Nov 28 14:53:08 crc kubenswrapper[4857]: I1128 14:53:08.218327 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 28 14:53:08 crc kubenswrapper[4857]: I1128 14:53:08.241898 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="440a5434-e29e-4d81-bbea-f985bafd8bec" path="/var/lib/kubelet/pods/440a5434-e29e-4d81-bbea-f985bafd8bec/volumes" Nov 28 14:53:08 crc kubenswrapper[4857]: I1128 14:53:08.604275 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 14:53:08 crc kubenswrapper[4857]: W1128 14:53:08.609988 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2178ad15_b27a_4f51_ad92_f6dbb8f8b1d6.slice/crio-1d1aaa3eeafddc29e14382ec91fd4e57aee9c8a68d29f1d202b98eae15bf2c4b WatchSource:0}: Error finding container 1d1aaa3eeafddc29e14382ec91fd4e57aee9c8a68d29f1d202b98eae15bf2c4b: Status 404 returned error can't find the container with id 1d1aaa3eeafddc29e14382ec91fd4e57aee9c8a68d29f1d202b98eae15bf2c4b Nov 28 14:53:09 crc kubenswrapper[4857]: I1128 14:53:09.229273 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6","Type":"ContainerDied","Data":"3569a6d8b64c49c9fd321db1e257107dabed5eabd94282c2d4681ebf9bca59b1"} Nov 28 14:53:09 crc kubenswrapper[4857]: I1128 14:53:09.229074 4857 generic.go:334] "Generic (PLEG): container finished" podID="2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6" containerID="3569a6d8b64c49c9fd321db1e257107dabed5eabd94282c2d4681ebf9bca59b1" exitCode=0 Nov 28 14:53:09 crc kubenswrapper[4857]: I1128 14:53:09.229810 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6","Type":"ContainerStarted","Data":"1d1aaa3eeafddc29e14382ec91fd4e57aee9c8a68d29f1d202b98eae15bf2c4b"} Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.650850 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.674857 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6/mariadb-client-2/0.log" Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.702601 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.709483 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.852231 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrqd5\" (UniqueName: \"kubernetes.io/projected/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6-kube-api-access-lrqd5\") pod \"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6\" (UID: \"2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6\") " Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.860325 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6-kube-api-access-lrqd5" (OuterVolumeSpecName: "kube-api-access-lrqd5") pod "2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6" (UID: "2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6"). InnerVolumeSpecName "kube-api-access-lrqd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:53:10 crc kubenswrapper[4857]: I1128 14:53:10.954866 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrqd5\" (UniqueName: \"kubernetes.io/projected/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6-kube-api-access-lrqd5\") on node \"crc\" DevicePath \"\"" Nov 28 14:53:11 crc kubenswrapper[4857]: I1128 14:53:11.267387 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d1aaa3eeafddc29e14382ec91fd4e57aee9c8a68d29f1d202b98eae15bf2c4b" Nov 28 14:53:11 crc kubenswrapper[4857]: I1128 14:53:11.267441 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 28 14:53:12 crc kubenswrapper[4857]: I1128 14:53:12.244323 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6" path="/var/lib/kubelet/pods/2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6/volumes" Nov 28 14:53:16 crc kubenswrapper[4857]: I1128 14:53:16.980975 4857 scope.go:117] "RemoveContainer" containerID="279c36dd31ab1160978dc5f82feae94386b0e55002249d92a7beb2f8a77bd67d" Nov 28 14:53:41 crc kubenswrapper[4857]: I1128 14:53:41.308498 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:53:41 crc kubenswrapper[4857]: I1128 14:53:41.309078 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:54:11 crc kubenswrapper[4857]: I1128 14:54:11.309013 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:54:11 crc kubenswrapper[4857]: I1128 14:54:11.309628 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.206403 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v8824"] Nov 28 14:54:24 crc kubenswrapper[4857]: E1128 14:54:24.207686 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6" containerName="mariadb-client-2" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.207709 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6" containerName="mariadb-client-2" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.208067 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2178ad15-b27a-4f51-ad92-f6dbb8f8b1d6" containerName="mariadb-client-2" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.210305 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.221593 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v8824"] Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.381771 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-utilities\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.381828 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-catalog-content\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.381854 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2q5k\" (UniqueName: \"kubernetes.io/projected/8b880881-a332-41d7-95ae-83d7b37e753b-kube-api-access-p2q5k\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.501635 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-utilities\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.501696 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-catalog-content\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.501736 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2q5k\" (UniqueName: \"kubernetes.io/projected/8b880881-a332-41d7-95ae-83d7b37e753b-kube-api-access-p2q5k\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.503112 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-catalog-content\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.503264 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-utilities\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.525561 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2q5k\" (UniqueName: \"kubernetes.io/projected/8b880881-a332-41d7-95ae-83d7b37e753b-kube-api-access-p2q5k\") pod \"redhat-operators-v8824\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:24 crc kubenswrapper[4857]: I1128 14:54:24.545099 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:25 crc kubenswrapper[4857]: I1128 14:54:25.013047 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v8824"] Nov 28 14:54:25 crc kubenswrapper[4857]: I1128 14:54:25.925286 4857 generic.go:334] "Generic (PLEG): container finished" podID="8b880881-a332-41d7-95ae-83d7b37e753b" containerID="de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4" exitCode=0 Nov 28 14:54:25 crc kubenswrapper[4857]: I1128 14:54:25.925358 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerDied","Data":"de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4"} Nov 28 14:54:25 crc kubenswrapper[4857]: I1128 14:54:25.925444 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerStarted","Data":"9a824ca2bb5fc1bdb67b9845b3227533f36d0e0812938eb6c7a51a612e0b4a63"} Nov 28 14:54:25 crc kubenswrapper[4857]: I1128 14:54:25.927964 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:54:29 crc kubenswrapper[4857]: I1128 14:54:29.960413 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerStarted","Data":"c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936"} Nov 28 14:54:30 crc kubenswrapper[4857]: I1128 14:54:30.976213 4857 generic.go:334] "Generic (PLEG): container finished" podID="8b880881-a332-41d7-95ae-83d7b37e753b" containerID="c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936" exitCode=0 Nov 28 14:54:30 crc kubenswrapper[4857]: I1128 14:54:30.976294 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerDied","Data":"c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936"} Nov 28 14:54:31 crc kubenswrapper[4857]: I1128 14:54:31.986902 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerStarted","Data":"3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23"} Nov 28 14:54:32 crc kubenswrapper[4857]: I1128 14:54:32.006306 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v8824" podStartSLOduration=2.533426888 podStartE2EDuration="8.006289254s" podCreationTimestamp="2025-11-28 14:54:24 +0000 UTC" firstStartedPulling="2025-11-28 14:54:25.92763321 +0000 UTC m=+5116.051574647" lastFinishedPulling="2025-11-28 14:54:31.400495566 +0000 UTC m=+5121.524437013" observedRunningTime="2025-11-28 14:54:32.000745696 +0000 UTC m=+5122.124687123" watchObservedRunningTime="2025-11-28 14:54:32.006289254 +0000 UTC m=+5122.130230691" Nov 28 14:54:34 crc kubenswrapper[4857]: I1128 14:54:34.545799 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:34 crc kubenswrapper[4857]: I1128 14:54:34.546128 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:35 crc kubenswrapper[4857]: I1128 14:54:35.601989 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-v8824" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="registry-server" probeResult="failure" output=< Nov 28 14:54:35 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 14:54:35 crc kubenswrapper[4857]: > Nov 28 14:54:41 crc kubenswrapper[4857]: I1128 14:54:41.308935 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 14:54:41 crc kubenswrapper[4857]: I1128 14:54:41.310715 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 14:54:41 crc kubenswrapper[4857]: I1128 14:54:41.310823 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 14:54:41 crc kubenswrapper[4857]: I1128 14:54:41.311466 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 14:54:41 crc kubenswrapper[4857]: I1128 14:54:41.311598 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" gracePeriod=600 Nov 28 14:54:43 crc kubenswrapper[4857]: I1128 14:54:43.096271 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" exitCode=0 Nov 28 14:54:43 crc kubenswrapper[4857]: I1128 14:54:43.096342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e"} Nov 28 14:54:43 crc kubenswrapper[4857]: I1128 14:54:43.097004 4857 scope.go:117] "RemoveContainer" containerID="e069878b8272165d675b3e5b9b4751408dfbaf5d46c3ee50c635da1d18076ac5" Nov 28 14:54:43 crc kubenswrapper[4857]: E1128 14:54:43.269725 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:54:44 crc kubenswrapper[4857]: I1128 14:54:44.112301 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:54:44 crc kubenswrapper[4857]: E1128 14:54:44.112802 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:54:44 crc kubenswrapper[4857]: I1128 14:54:44.603572 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:44 crc kubenswrapper[4857]: I1128 14:54:44.673097 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:44 crc kubenswrapper[4857]: I1128 14:54:44.840845 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v8824"] Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.127336 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v8824" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="registry-server" containerID="cri-o://3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23" gracePeriod=2 Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.760884 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.923642 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2q5k\" (UniqueName: \"kubernetes.io/projected/8b880881-a332-41d7-95ae-83d7b37e753b-kube-api-access-p2q5k\") pod \"8b880881-a332-41d7-95ae-83d7b37e753b\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.923711 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-catalog-content\") pod \"8b880881-a332-41d7-95ae-83d7b37e753b\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.923780 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-utilities\") pod \"8b880881-a332-41d7-95ae-83d7b37e753b\" (UID: \"8b880881-a332-41d7-95ae-83d7b37e753b\") " Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.924860 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-utilities" (OuterVolumeSpecName: "utilities") pod "8b880881-a332-41d7-95ae-83d7b37e753b" (UID: "8b880881-a332-41d7-95ae-83d7b37e753b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:54:46 crc kubenswrapper[4857]: I1128 14:54:46.931371 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b880881-a332-41d7-95ae-83d7b37e753b-kube-api-access-p2q5k" (OuterVolumeSpecName: "kube-api-access-p2q5k") pod "8b880881-a332-41d7-95ae-83d7b37e753b" (UID: "8b880881-a332-41d7-95ae-83d7b37e753b"). InnerVolumeSpecName "kube-api-access-p2q5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.025656 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2q5k\" (UniqueName: \"kubernetes.io/projected/8b880881-a332-41d7-95ae-83d7b37e753b-kube-api-access-p2q5k\") on node \"crc\" DevicePath \"\"" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.026098 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.051688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8b880881-a332-41d7-95ae-83d7b37e753b" (UID: "8b880881-a332-41d7-95ae-83d7b37e753b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.127361 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8b880881-a332-41d7-95ae-83d7b37e753b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.136348 4857 generic.go:334] "Generic (PLEG): container finished" podID="8b880881-a332-41d7-95ae-83d7b37e753b" containerID="3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23" exitCode=0 Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.136384 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerDied","Data":"3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23"} Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.136426 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v8824" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.136437 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v8824" event={"ID":"8b880881-a332-41d7-95ae-83d7b37e753b","Type":"ContainerDied","Data":"9a824ca2bb5fc1bdb67b9845b3227533f36d0e0812938eb6c7a51a612e0b4a63"} Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.136459 4857 scope.go:117] "RemoveContainer" containerID="3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.186397 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v8824"] Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.189996 4857 scope.go:117] "RemoveContainer" containerID="c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.196855 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v8824"] Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.213270 4857 scope.go:117] "RemoveContainer" containerID="de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.245676 4857 scope.go:117] "RemoveContainer" containerID="3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23" Nov 28 14:54:47 crc kubenswrapper[4857]: E1128 14:54:47.246292 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23\": container with ID starting with 3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23 not found: ID does not exist" containerID="3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.246337 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23"} err="failed to get container status \"3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23\": rpc error: code = NotFound desc = could not find container \"3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23\": container with ID starting with 3c37e8ccf2a1a059792f1f34b94d4510ae0b1022153c579da87bb12b86330b23 not found: ID does not exist" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.246364 4857 scope.go:117] "RemoveContainer" containerID="c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936" Nov 28 14:54:47 crc kubenswrapper[4857]: E1128 14:54:47.246789 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936\": container with ID starting with c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936 not found: ID does not exist" containerID="c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.246807 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936"} err="failed to get container status \"c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936\": rpc error: code = NotFound desc = could not find container \"c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936\": container with ID starting with c063fb2bb34fb88699eaa5caed4d0bdab853d14fb3b2f4ec284be5e89ed58936 not found: ID does not exist" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.246818 4857 scope.go:117] "RemoveContainer" containerID="de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4" Nov 28 14:54:47 crc kubenswrapper[4857]: E1128 14:54:47.247133 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4\": container with ID starting with de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4 not found: ID does not exist" containerID="de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4" Nov 28 14:54:47 crc kubenswrapper[4857]: I1128 14:54:47.247184 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4"} err="failed to get container status \"de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4\": rpc error: code = NotFound desc = could not find container \"de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4\": container with ID starting with de88ad76c3d94c2a20b3cd72baeb35addf33fa682d5aeb13961483b159841df4 not found: ID does not exist" Nov 28 14:54:48 crc kubenswrapper[4857]: I1128 14:54:48.237436 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" path="/var/lib/kubelet/pods/8b880881-a332-41d7-95ae-83d7b37e753b/volumes" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.722641 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sldrc"] Nov 28 14:54:55 crc kubenswrapper[4857]: E1128 14:54:55.723719 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="extract-content" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.723735 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="extract-content" Nov 28 14:54:55 crc kubenswrapper[4857]: E1128 14:54:55.723747 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="registry-server" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.723753 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="registry-server" Nov 28 14:54:55 crc kubenswrapper[4857]: E1128 14:54:55.723775 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="extract-utilities" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.723781 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="extract-utilities" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.723929 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b880881-a332-41d7-95ae-83d7b37e753b" containerName="registry-server" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.725051 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.733789 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sldrc"] Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.886803 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfrqm\" (UniqueName: \"kubernetes.io/projected/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-kube-api-access-mfrqm\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.886882 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-utilities\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.886929 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-catalog-content\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.988377 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfrqm\" (UniqueName: \"kubernetes.io/projected/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-kube-api-access-mfrqm\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.988455 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-utilities\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.988495 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-catalog-content\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.989139 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-utilities\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:55 crc kubenswrapper[4857]: I1128 14:54:55.989203 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-catalog-content\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:56 crc kubenswrapper[4857]: I1128 14:54:56.018478 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfrqm\" (UniqueName: \"kubernetes.io/projected/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-kube-api-access-mfrqm\") pod \"certified-operators-sldrc\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:56 crc kubenswrapper[4857]: I1128 14:54:56.046329 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:54:56 crc kubenswrapper[4857]: I1128 14:54:56.550555 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sldrc"] Nov 28 14:54:57 crc kubenswrapper[4857]: I1128 14:54:57.226525 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerID="b291789913d387e3d8a4c06957cc574b99112f5fc44d213a63e328b8422ea472" exitCode=0 Nov 28 14:54:57 crc kubenswrapper[4857]: I1128 14:54:57.226588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sldrc" event={"ID":"ab5b1ecd-b2c7-4b50-a759-ded22e270e92","Type":"ContainerDied","Data":"b291789913d387e3d8a4c06957cc574b99112f5fc44d213a63e328b8422ea472"} Nov 28 14:54:57 crc kubenswrapper[4857]: I1128 14:54:57.226970 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sldrc" event={"ID":"ab5b1ecd-b2c7-4b50-a759-ded22e270e92","Type":"ContainerStarted","Data":"5b42458a440704c8ff6e97488b61fd03ac025bf372c04b71d244fd5209b06fed"} Nov 28 14:54:58 crc kubenswrapper[4857]: I1128 14:54:58.228888 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:54:58 crc kubenswrapper[4857]: E1128 14:54:58.229123 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:55:01 crc kubenswrapper[4857]: I1128 14:55:01.267750 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerID="c7d96785e542ded0cd45ae5be7e9c9d190ac66429da393d8fbcafe24e1dff1b2" exitCode=0 Nov 28 14:55:01 crc kubenswrapper[4857]: I1128 14:55:01.267874 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sldrc" event={"ID":"ab5b1ecd-b2c7-4b50-a759-ded22e270e92","Type":"ContainerDied","Data":"c7d96785e542ded0cd45ae5be7e9c9d190ac66429da393d8fbcafe24e1dff1b2"} Nov 28 14:55:02 crc kubenswrapper[4857]: I1128 14:55:02.281139 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sldrc" event={"ID":"ab5b1ecd-b2c7-4b50-a759-ded22e270e92","Type":"ContainerStarted","Data":"b4408c43bd663c21404aabc6f404e73966b9620762646f851b97eddcb70a73c6"} Nov 28 14:55:02 crc kubenswrapper[4857]: I1128 14:55:02.308469 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sldrc" podStartSLOduration=2.536521645 podStartE2EDuration="7.308443337s" podCreationTimestamp="2025-11-28 14:54:55 +0000 UTC" firstStartedPulling="2025-11-28 14:54:57.230415611 +0000 UTC m=+5147.354357038" lastFinishedPulling="2025-11-28 14:55:02.002337293 +0000 UTC m=+5152.126278730" observedRunningTime="2025-11-28 14:55:02.301176464 +0000 UTC m=+5152.425117901" watchObservedRunningTime="2025-11-28 14:55:02.308443337 +0000 UTC m=+5152.432384774" Nov 28 14:55:06 crc kubenswrapper[4857]: I1128 14:55:06.047006 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:55:06 crc kubenswrapper[4857]: I1128 14:55:06.047575 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:55:06 crc kubenswrapper[4857]: I1128 14:55:06.105681 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:55:10 crc kubenswrapper[4857]: I1128 14:55:10.233005 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:55:10 crc kubenswrapper[4857]: E1128 14:55:10.234022 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:55:16 crc kubenswrapper[4857]: I1128 14:55:16.099749 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:55:16 crc kubenswrapper[4857]: I1128 14:55:16.155447 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sldrc"] Nov 28 14:55:16 crc kubenswrapper[4857]: I1128 14:55:16.417441 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sldrc" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="registry-server" containerID="cri-o://b4408c43bd663c21404aabc6f404e73966b9620762646f851b97eddcb70a73c6" gracePeriod=2 Nov 28 14:55:17 crc kubenswrapper[4857]: I1128 14:55:17.428502 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerID="b4408c43bd663c21404aabc6f404e73966b9620762646f851b97eddcb70a73c6" exitCode=0 Nov 28 14:55:17 crc kubenswrapper[4857]: I1128 14:55:17.428558 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sldrc" event={"ID":"ab5b1ecd-b2c7-4b50-a759-ded22e270e92","Type":"ContainerDied","Data":"b4408c43bd663c21404aabc6f404e73966b9620762646f851b97eddcb70a73c6"} Nov 28 14:55:17 crc kubenswrapper[4857]: I1128 14:55:17.991297 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.103096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-catalog-content\") pod \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.103160 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfrqm\" (UniqueName: \"kubernetes.io/projected/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-kube-api-access-mfrqm\") pod \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.103276 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-utilities\") pod \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\" (UID: \"ab5b1ecd-b2c7-4b50-a759-ded22e270e92\") " Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.104563 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-utilities" (OuterVolumeSpecName: "utilities") pod "ab5b1ecd-b2c7-4b50-a759-ded22e270e92" (UID: "ab5b1ecd-b2c7-4b50-a759-ded22e270e92"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.108035 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-kube-api-access-mfrqm" (OuterVolumeSpecName: "kube-api-access-mfrqm") pod "ab5b1ecd-b2c7-4b50-a759-ded22e270e92" (UID: "ab5b1ecd-b2c7-4b50-a759-ded22e270e92"). InnerVolumeSpecName "kube-api-access-mfrqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.155428 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ab5b1ecd-b2c7-4b50-a759-ded22e270e92" (UID: "ab5b1ecd-b2c7-4b50-a759-ded22e270e92"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.205487 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.205744 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.205801 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfrqm\" (UniqueName: \"kubernetes.io/projected/ab5b1ecd-b2c7-4b50-a759-ded22e270e92-kube-api-access-mfrqm\") on node \"crc\" DevicePath \"\"" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.448968 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sldrc" event={"ID":"ab5b1ecd-b2c7-4b50-a759-ded22e270e92","Type":"ContainerDied","Data":"5b42458a440704c8ff6e97488b61fd03ac025bf372c04b71d244fd5209b06fed"} Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.449049 4857 scope.go:117] "RemoveContainer" containerID="b4408c43bd663c21404aabc6f404e73966b9620762646f851b97eddcb70a73c6" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.449129 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sldrc" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.473335 4857 scope.go:117] "RemoveContainer" containerID="c7d96785e542ded0cd45ae5be7e9c9d190ac66429da393d8fbcafe24e1dff1b2" Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.487833 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sldrc"] Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.495082 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sldrc"] Nov 28 14:55:18 crc kubenswrapper[4857]: I1128 14:55:18.506223 4857 scope.go:117] "RemoveContainer" containerID="b291789913d387e3d8a4c06957cc574b99112f5fc44d213a63e328b8422ea472" Nov 28 14:55:20 crc kubenswrapper[4857]: I1128 14:55:20.239539 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" path="/var/lib/kubelet/pods/ab5b1ecd-b2c7-4b50-a759-ded22e270e92/volumes" Nov 28 14:55:23 crc kubenswrapper[4857]: I1128 14:55:23.229413 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:55:23 crc kubenswrapper[4857]: E1128 14:55:23.230353 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:55:36 crc kubenswrapper[4857]: I1128 14:55:36.229049 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:55:36 crc kubenswrapper[4857]: E1128 14:55:36.229731 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:55:49 crc kubenswrapper[4857]: I1128 14:55:49.229245 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:55:49 crc kubenswrapper[4857]: E1128 14:55:49.230388 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:56:01 crc kubenswrapper[4857]: I1128 14:56:01.228660 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:56:01 crc kubenswrapper[4857]: E1128 14:56:01.229590 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:56:15 crc kubenswrapper[4857]: I1128 14:56:15.228904 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:56:15 crc kubenswrapper[4857]: E1128 14:56:15.230144 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:56:30 crc kubenswrapper[4857]: I1128 14:56:30.234602 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:56:30 crc kubenswrapper[4857]: E1128 14:56:30.235648 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:56:42 crc kubenswrapper[4857]: I1128 14:56:42.229816 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:56:42 crc kubenswrapper[4857]: E1128 14:56:42.231570 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:56:54 crc kubenswrapper[4857]: I1128 14:56:54.229126 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:56:54 crc kubenswrapper[4857]: E1128 14:56:54.230140 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:57:07 crc kubenswrapper[4857]: I1128 14:57:07.228293 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:57:07 crc kubenswrapper[4857]: E1128 14:57:07.229124 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:57:19 crc kubenswrapper[4857]: I1128 14:57:19.229648 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:57:19 crc kubenswrapper[4857]: E1128 14:57:19.230974 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:57:30 crc kubenswrapper[4857]: I1128 14:57:30.234068 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:57:30 crc kubenswrapper[4857]: E1128 14:57:30.235018 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.932025 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 14:57:39 crc kubenswrapper[4857]: E1128 14:57:39.932836 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="extract-content" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.932850 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="extract-content" Nov 28 14:57:39 crc kubenswrapper[4857]: E1128 14:57:39.932862 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="extract-utilities" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.932868 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="extract-utilities" Nov 28 14:57:39 crc kubenswrapper[4857]: E1128 14:57:39.932880 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="registry-server" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.932887 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="registry-server" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.933086 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab5b1ecd-b2c7-4b50-a759-ded22e270e92" containerName="registry-server" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.933639 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.936375 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-trntl" Nov 28 14:57:39 crc kubenswrapper[4857]: I1128 14:57:39.948027 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.065435 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7n6g\" (UniqueName: \"kubernetes.io/projected/ccc6a333-9b75-487e-8d3a-740ec87a9136-kube-api-access-q7n6g\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.065879 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.167485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7n6g\" (UniqueName: \"kubernetes.io/projected/ccc6a333-9b75-487e-8d3a-740ec87a9136-kube-api-access-q7n6g\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.167600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.171164 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.171237 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b13df03335c86ed72e0f642a57ea67d694a617fb3650bcecbfadb6d3f6e56376/globalmount\"" pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.190076 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7n6g\" (UniqueName: \"kubernetes.io/projected/ccc6a333-9b75-487e-8d3a-740ec87a9136-kube-api-access-q7n6g\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.215227 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") pod \"mariadb-copy-data\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.263169 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.797691 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 14:57:40 crc kubenswrapper[4857]: I1128 14:57:40.857621 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"ccc6a333-9b75-487e-8d3a-740ec87a9136","Type":"ContainerStarted","Data":"7d2d8776a59be36366f96fc3dfed0523a27d8592f54fa04b4cfc839553db52fd"} Nov 28 14:57:41 crc kubenswrapper[4857]: I1128 14:57:41.869638 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"ccc6a333-9b75-487e-8d3a-740ec87a9136","Type":"ContainerStarted","Data":"d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca"} Nov 28 14:57:41 crc kubenswrapper[4857]: I1128 14:57:41.886366 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=3.886348008 podStartE2EDuration="3.886348008s" podCreationTimestamp="2025-11-28 14:57:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:57:41.884583961 +0000 UTC m=+5312.008525408" watchObservedRunningTime="2025-11-28 14:57:41.886348008 +0000 UTC m=+5312.010289455" Nov 28 14:57:44 crc kubenswrapper[4857]: I1128 14:57:44.908182 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:44 crc kubenswrapper[4857]: I1128 14:57:44.909924 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:44 crc kubenswrapper[4857]: I1128 14:57:44.930890 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.083017 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqdz9\" (UniqueName: \"kubernetes.io/projected/eb22b512-9dcd-454a-b8a5-17d14260f720-kube-api-access-tqdz9\") pod \"mariadb-client\" (UID: \"eb22b512-9dcd-454a-b8a5-17d14260f720\") " pod="openstack/mariadb-client" Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.184507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqdz9\" (UniqueName: \"kubernetes.io/projected/eb22b512-9dcd-454a-b8a5-17d14260f720-kube-api-access-tqdz9\") pod \"mariadb-client\" (UID: \"eb22b512-9dcd-454a-b8a5-17d14260f720\") " pod="openstack/mariadb-client" Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.205177 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqdz9\" (UniqueName: \"kubernetes.io/projected/eb22b512-9dcd-454a-b8a5-17d14260f720-kube-api-access-tqdz9\") pod \"mariadb-client\" (UID: \"eb22b512-9dcd-454a-b8a5-17d14260f720\") " pod="openstack/mariadb-client" Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.228828 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:57:45 crc kubenswrapper[4857]: E1128 14:57:45.229127 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.234030 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.684220 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:45 crc kubenswrapper[4857]: I1128 14:57:45.913805 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"eb22b512-9dcd-454a-b8a5-17d14260f720","Type":"ContainerStarted","Data":"01ec278ba09a1a7507966e71b2167fa520f0daeafeab978240129ee1cb067673"} Nov 28 14:57:46 crc kubenswrapper[4857]: I1128 14:57:46.924203 4857 generic.go:334] "Generic (PLEG): container finished" podID="eb22b512-9dcd-454a-b8a5-17d14260f720" containerID="d13faed0f994bd7af18e66e41526509932f300faef1a1b534b2af7a0d1075eb0" exitCode=0 Nov 28 14:57:46 crc kubenswrapper[4857]: I1128 14:57:46.924326 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"eb22b512-9dcd-454a-b8a5-17d14260f720","Type":"ContainerDied","Data":"d13faed0f994bd7af18e66e41526509932f300faef1a1b534b2af7a0d1075eb0"} Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.221979 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.248383 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_eb22b512-9dcd-454a-b8a5-17d14260f720/mariadb-client/0.log" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.285043 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.292135 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.336367 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqdz9\" (UniqueName: \"kubernetes.io/projected/eb22b512-9dcd-454a-b8a5-17d14260f720-kube-api-access-tqdz9\") pod \"eb22b512-9dcd-454a-b8a5-17d14260f720\" (UID: \"eb22b512-9dcd-454a-b8a5-17d14260f720\") " Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.341939 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb22b512-9dcd-454a-b8a5-17d14260f720-kube-api-access-tqdz9" (OuterVolumeSpecName: "kube-api-access-tqdz9") pod "eb22b512-9dcd-454a-b8a5-17d14260f720" (UID: "eb22b512-9dcd-454a-b8a5-17d14260f720"). InnerVolumeSpecName "kube-api-access-tqdz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.438258 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqdz9\" (UniqueName: \"kubernetes.io/projected/eb22b512-9dcd-454a-b8a5-17d14260f720-kube-api-access-tqdz9\") on node \"crc\" DevicePath \"\"" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.439153 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:48 crc kubenswrapper[4857]: E1128 14:57:48.439608 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb22b512-9dcd-454a-b8a5-17d14260f720" containerName="mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.439625 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb22b512-9dcd-454a-b8a5-17d14260f720" containerName="mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.439787 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb22b512-9dcd-454a-b8a5-17d14260f720" containerName="mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.440646 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.448452 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.540141 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8svg\" (UniqueName: \"kubernetes.io/projected/f715b698-ec19-4ac4-91a6-1995df53605b-kube-api-access-x8svg\") pod \"mariadb-client\" (UID: \"f715b698-ec19-4ac4-91a6-1995df53605b\") " pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.641776 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8svg\" (UniqueName: \"kubernetes.io/projected/f715b698-ec19-4ac4-91a6-1995df53605b-kube-api-access-x8svg\") pod \"mariadb-client\" (UID: \"f715b698-ec19-4ac4-91a6-1995df53605b\") " pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.673380 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8svg\" (UniqueName: \"kubernetes.io/projected/f715b698-ec19-4ac4-91a6-1995df53605b-kube-api-access-x8svg\") pod \"mariadb-client\" (UID: \"f715b698-ec19-4ac4-91a6-1995df53605b\") " pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.758802 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.961630 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="01ec278ba09a1a7507966e71b2167fa520f0daeafeab978240129ee1cb067673" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.961785 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:48 crc kubenswrapper[4857]: I1128 14:57:48.986831 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/mariadb-client" oldPodUID="eb22b512-9dcd-454a-b8a5-17d14260f720" podUID="f715b698-ec19-4ac4-91a6-1995df53605b" Nov 28 14:57:49 crc kubenswrapper[4857]: I1128 14:57:49.229686 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:49 crc kubenswrapper[4857]: W1128 14:57:49.235308 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf715b698_ec19_4ac4_91a6_1995df53605b.slice/crio-ed2ca939e51e1691c08d4c86ae634083fba4d5e06fc3ceca35fc1739558b9594 WatchSource:0}: Error finding container ed2ca939e51e1691c08d4c86ae634083fba4d5e06fc3ceca35fc1739558b9594: Status 404 returned error can't find the container with id ed2ca939e51e1691c08d4c86ae634083fba4d5e06fc3ceca35fc1739558b9594 Nov 28 14:57:49 crc kubenswrapper[4857]: I1128 14:57:49.973714 4857 generic.go:334] "Generic (PLEG): container finished" podID="f715b698-ec19-4ac4-91a6-1995df53605b" containerID="60a31fc3ffc6f287af1d2ccde25770c5621b71289a47cb6f2cd7b2545b0f7b48" exitCode=0 Nov 28 14:57:49 crc kubenswrapper[4857]: I1128 14:57:49.974175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"f715b698-ec19-4ac4-91a6-1995df53605b","Type":"ContainerDied","Data":"60a31fc3ffc6f287af1d2ccde25770c5621b71289a47cb6f2cd7b2545b0f7b48"} Nov 28 14:57:49 crc kubenswrapper[4857]: I1128 14:57:49.974222 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"f715b698-ec19-4ac4-91a6-1995df53605b","Type":"ContainerStarted","Data":"ed2ca939e51e1691c08d4c86ae634083fba4d5e06fc3ceca35fc1739558b9594"} Nov 28 14:57:50 crc kubenswrapper[4857]: I1128 14:57:50.249111 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb22b512-9dcd-454a-b8a5-17d14260f720" path="/var/lib/kubelet/pods/eb22b512-9dcd-454a-b8a5-17d14260f720/volumes" Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.388126 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.413520 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_f715b698-ec19-4ac4-91a6-1995df53605b/mariadb-client/0.log" Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.444396 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.452424 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.492777 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8svg\" (UniqueName: \"kubernetes.io/projected/f715b698-ec19-4ac4-91a6-1995df53605b-kube-api-access-x8svg\") pod \"f715b698-ec19-4ac4-91a6-1995df53605b\" (UID: \"f715b698-ec19-4ac4-91a6-1995df53605b\") " Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.501174 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f715b698-ec19-4ac4-91a6-1995df53605b-kube-api-access-x8svg" (OuterVolumeSpecName: "kube-api-access-x8svg") pod "f715b698-ec19-4ac4-91a6-1995df53605b" (UID: "f715b698-ec19-4ac4-91a6-1995df53605b"). InnerVolumeSpecName "kube-api-access-x8svg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.605588 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8svg\" (UniqueName: \"kubernetes.io/projected/f715b698-ec19-4ac4-91a6-1995df53605b-kube-api-access-x8svg\") on node \"crc\" DevicePath \"\"" Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.997068 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed2ca939e51e1691c08d4c86ae634083fba4d5e06fc3ceca35fc1739558b9594" Nov 28 14:57:51 crc kubenswrapper[4857]: I1128 14:57:51.997194 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 28 14:57:52 crc kubenswrapper[4857]: I1128 14:57:52.237873 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f715b698-ec19-4ac4-91a6-1995df53605b" path="/var/lib/kubelet/pods/f715b698-ec19-4ac4-91a6-1995df53605b/volumes" Nov 28 14:57:58 crc kubenswrapper[4857]: I1128 14:57:58.228825 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:57:58 crc kubenswrapper[4857]: E1128 14:57:58.229845 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:58:11 crc kubenswrapper[4857]: I1128 14:58:11.228660 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:58:11 crc kubenswrapper[4857]: E1128 14:58:11.229722 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:58:26 crc kubenswrapper[4857]: I1128 14:58:26.230782 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:58:26 crc kubenswrapper[4857]: E1128 14:58:26.233088 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.580420 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 14:58:29 crc kubenswrapper[4857]: E1128 14:58:29.581128 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f715b698-ec19-4ac4-91a6-1995df53605b" containerName="mariadb-client" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.581145 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f715b698-ec19-4ac4-91a6-1995df53605b" containerName="mariadb-client" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.581345 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f715b698-ec19-4ac4-91a6-1995df53605b" containerName="mariadb-client" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.582323 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.585254 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-8gsm2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.586310 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.586313 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.609643 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.626757 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.632104 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.641741 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.654091 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.655550 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.676646 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.723432 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.723500 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.723549 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6w9kp\" (UniqueName: \"kubernetes.io/projected/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-kube-api-access-6w9kp\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.723568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.723583 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.723612 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-config\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.786403 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.787667 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.791513 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.791801 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-kg7tx" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.795557 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.808636 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.819356 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.821012 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.825600 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-config\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.825661 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5526a303-a350-4271-8e45-36881affbd04-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.825731 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.825892 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.825963 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5526a303-a350-4271-8e45-36881affbd04-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826011 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4j72\" (UniqueName: \"kubernetes.io/projected/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-kube-api-access-d4j72\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826088 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826144 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5526a303-a350-4271-8e45-36881affbd04-config\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826179 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghhbp\" (UniqueName: \"kubernetes.io/projected/5526a303-a350-4271-8e45-36881affbd04-kube-api-access-ghhbp\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826209 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826280 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826336 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5526a303-a350-4271-8e45-36881affbd04-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826373 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-config\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826553 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6w9kp\" (UniqueName: \"kubernetes.io/projected/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-kube-api-access-6w9kp\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826618 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826649 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.826682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.827193 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-config\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.827320 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.827923 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.833786 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.837240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.837319 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.849296 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.849360 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/355f3c35de3d35e06aa04186b10e4225612c4bea043182ae4778397121427de3/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.849883 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.856513 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.860545 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6w9kp\" (UniqueName: \"kubernetes.io/projected/87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c-kube-api-access-6w9kp\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.902281 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-4b674c0a-da32-49e8-87b3-d76bb3576390\") pod \"ovsdbserver-nb-0\" (UID: \"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c\") " pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.909422 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937056 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937147 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l7zz\" (UniqueName: \"kubernetes.io/projected/215aa7d0-2437-4c1a-ab37-80be33b6fca0-kube-api-access-5l7zz\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937192 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b81238d-98a7-4c18-930f-6bcce93525e9-config\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937233 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937259 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d4f36444-104b-4cb9-befb-02e72742474b-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937295 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5526a303-a350-4271-8e45-36881affbd04-config\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937323 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/215aa7d0-2437-4c1a-ab37-80be33b6fca0-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghhbp\" (UniqueName: \"kubernetes.io/projected/5526a303-a350-4271-8e45-36881affbd04-kube-api-access-ghhbp\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937804 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937863 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.937921 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5526a303-a350-4271-8e45-36881affbd04-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938015 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-config\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938055 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f36444-104b-4cb9-befb-02e72742474b-config\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938162 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4f36444-104b-4cb9-befb-02e72742474b-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938203 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938252 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b81238d-98a7-4c18-930f-6bcce93525e9-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938282 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f36444-104b-4cb9-befb-02e72742474b-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938335 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b81238d-98a7-4c18-930f-6bcce93525e9-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4t5q6\" (UniqueName: \"kubernetes.io/projected/4b81238d-98a7-4c18-930f-6bcce93525e9-kube-api-access-4t5q6\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/215aa7d0-2437-4c1a-ab37-80be33b6fca0-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938453 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5526a303-a350-4271-8e45-36881affbd04-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938486 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938537 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4b81238d-98a7-4c18-930f-6bcce93525e9-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938564 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvgbg\" (UniqueName: \"kubernetes.io/projected/d4f36444-104b-4cb9-befb-02e72742474b-kube-api-access-nvgbg\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938614 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938577 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5526a303-a350-4271-8e45-36881affbd04-config\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938644 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/215aa7d0-2437-4c1a-ab37-80be33b6fca0-config\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.938810 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.940584 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5526a303-a350-4271-8e45-36881affbd04-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.940648 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.940711 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5526a303-a350-4271-8e45-36881affbd04-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.940740 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215aa7d0-2437-4c1a-ab37-80be33b6fca0-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.941236 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-config\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.941774 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4j72\" (UniqueName: \"kubernetes.io/projected/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-kube-api-access-d4j72\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.942939 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.943000 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2805be154e8f831c2b61cd23a05beebcf1fd86b2facaaca418f0454d39116cc1/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.944027 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.944487 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.944622 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5526a303-a350-4271-8e45-36881affbd04-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.946870 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.946897 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/34b292bf5b5ea2d7783861f7dd0d387c08b1da8bad5675428829ef1cf83aedb0/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.951876 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5526a303-a350-4271-8e45-36881affbd04-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.960153 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghhbp\" (UniqueName: \"kubernetes.io/projected/5526a303-a350-4271-8e45-36881affbd04-kube-api-access-ghhbp\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.961901 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4j72\" (UniqueName: \"kubernetes.io/projected/5369c4e6-8532-4d9c-88a7-7fbb5c22d44e-kube-api-access-d4j72\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.981036 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a7ff10c1-2426-4bdc-84a8-639c9abd9cb3\") pod \"ovsdbserver-nb-2\" (UID: \"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e\") " pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:29 crc kubenswrapper[4857]: I1128 14:58:29.997083 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-9e2df167-a2c8-420a-b409-b97ac67e5642\") pod \"ovsdbserver-nb-1\" (UID: \"5526a303-a350-4271-8e45-36881affbd04\") " pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051485 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l7zz\" (UniqueName: \"kubernetes.io/projected/215aa7d0-2437-4c1a-ab37-80be33b6fca0-kube-api-access-5l7zz\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051853 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b81238d-98a7-4c18-930f-6bcce93525e9-config\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051874 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051891 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d4f36444-104b-4cb9-befb-02e72742474b-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051908 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/215aa7d0-2437-4c1a-ab37-80be33b6fca0-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051942 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f36444-104b-4cb9-befb-02e72742474b-config\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.051998 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4f36444-104b-4cb9-befb-02e72742474b-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052020 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b81238d-98a7-4c18-930f-6bcce93525e9-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052034 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f36444-104b-4cb9-befb-02e72742474b-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052052 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b81238d-98a7-4c18-930f-6bcce93525e9-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052072 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4t5q6\" (UniqueName: \"kubernetes.io/projected/4b81238d-98a7-4c18-930f-6bcce93525e9-kube-api-access-4t5q6\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052094 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/215aa7d0-2437-4c1a-ab37-80be33b6fca0-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052118 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4b81238d-98a7-4c18-930f-6bcce93525e9-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052132 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvgbg\" (UniqueName: \"kubernetes.io/projected/d4f36444-104b-4cb9-befb-02e72742474b-kube-api-access-nvgbg\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052156 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/215aa7d0-2437-4c1a-ab37-80be33b6fca0-config\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052176 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052191 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215aa7d0-2437-4c1a-ab37-80be33b6fca0-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.052216 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.053442 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4b81238d-98a7-4c18-930f-6bcce93525e9-config\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.053838 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d4f36444-104b-4cb9-befb-02e72742474b-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.054628 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4b81238d-98a7-4c18-930f-6bcce93525e9-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.055475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4f36444-104b-4cb9-befb-02e72742474b-config\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.056714 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4f36444-104b-4cb9-befb-02e72742474b-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.059094 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4b81238d-98a7-4c18-930f-6bcce93525e9-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.065075 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/215aa7d0-2437-4c1a-ab37-80be33b6fca0-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.065780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/215aa7d0-2437-4c1a-ab37-80be33b6fca0-config\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.069290 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/215aa7d0-2437-4c1a-ab37-80be33b6fca0-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.074558 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4b81238d-98a7-4c18-930f-6bcce93525e9-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.074663 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215aa7d0-2437-4c1a-ab37-80be33b6fca0-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.083572 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4f36444-104b-4cb9-befb-02e72742474b-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.086608 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4t5q6\" (UniqueName: \"kubernetes.io/projected/4b81238d-98a7-4c18-930f-6bcce93525e9-kube-api-access-4t5q6\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.086608 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l7zz\" (UniqueName: \"kubernetes.io/projected/215aa7d0-2437-4c1a-ab37-80be33b6fca0-kube-api-access-5l7zz\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.114843 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.114888 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1779180f52f7f1630ee96e96c6f5f937c301b092539d9e1c5804abbab869d5b7/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.114932 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.114992 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/49435f700b704a899b83175ba2ae1357559f7f42161eb0101f83c9dac6ad815c/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.115283 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.115323 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/ae668ccc739db0c6494e22c944f8d97b71ce1a98247a37639c63bed2768947a2/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.121632 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvgbg\" (UniqueName: \"kubernetes.io/projected/d4f36444-104b-4cb9-befb-02e72742474b-kube-api-access-nvgbg\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.171776 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dc5661eb-3cdd-4440-8a9d-71ddba0c48e9\") pod \"ovsdbserver-sb-2\" (UID: \"d4f36444-104b-4cb9-befb-02e72742474b\") " pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.181398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-866f1dcf-0182-4a38-a100-d0a895f5086c\") pod \"ovsdbserver-sb-0\" (UID: \"4b81238d-98a7-4c18-930f-6bcce93525e9\") " pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.197766 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b88202d9-0c35-4768-857f-3fd0b9d3df55\") pod \"ovsdbserver-sb-1\" (UID: \"215aa7d0-2437-4c1a-ab37-80be33b6fca0\") " pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.233577 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.234215 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.254570 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.276749 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.375914 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.435467 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:30 crc kubenswrapper[4857]: I1128 14:58:30.955798 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 28 14:58:30 crc kubenswrapper[4857]: W1128 14:58:30.957271 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod215aa7d0_2437_4c1a_ab37_80be33b6fca0.slice/crio-88f6749c32ddbd69b021314c04e2b9fdde96a2c0e23791d77ba79b24d1cd334a WatchSource:0}: Error finding container 88f6749c32ddbd69b021314c04e2b9fdde96a2c0e23791d77ba79b24d1cd334a: Status 404 returned error can't find the container with id 88f6749c32ddbd69b021314c04e2b9fdde96a2c0e23791d77ba79b24d1cd334a Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.014762 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 28 14:58:31 crc kubenswrapper[4857]: W1128 14:58:31.030040 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5526a303_a350_4271_8e45_36881affbd04.slice/crio-42549110e3ac309834befe5f4e443901892d3eafe42a8632d153aa5eaa6769ca WatchSource:0}: Error finding container 42549110e3ac309834befe5f4e443901892d3eafe42a8632d153aa5eaa6769ca: Status 404 returned error can't find the container with id 42549110e3ac309834befe5f4e443901892d3eafe42a8632d153aa5eaa6769ca Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.101878 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.379256 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"5526a303-a350-4271-8e45-36881affbd04","Type":"ContainerStarted","Data":"77b2f2d1fbaa3ab3c8128c5c9f54afa77348dd458e177f9a2cd225ed93326233"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.379783 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"5526a303-a350-4271-8e45-36881affbd04","Type":"ContainerStarted","Data":"b74a0df641aa461258eab0f560167eaa2dabab26efc1c8f7760a93fad3d028a6"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.379799 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"5526a303-a350-4271-8e45-36881affbd04","Type":"ContainerStarted","Data":"42549110e3ac309834befe5f4e443901892d3eafe42a8632d153aa5eaa6769ca"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.382448 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"215aa7d0-2437-4c1a-ab37-80be33b6fca0","Type":"ContainerStarted","Data":"9d9f6c0da1f29834641ff793e18396c021adff76c5b201d1226becfd1126bd97"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.382497 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"215aa7d0-2437-4c1a-ab37-80be33b6fca0","Type":"ContainerStarted","Data":"e442ea9ce3b08fbabd4150387c158e488d590c69305002e74d9f70b0352e4e41"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.382511 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"215aa7d0-2437-4c1a-ab37-80be33b6fca0","Type":"ContainerStarted","Data":"88f6749c32ddbd69b021314c04e2b9fdde96a2c0e23791d77ba79b24d1cd334a"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.385803 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c","Type":"ContainerStarted","Data":"303073a9336f937f4d0b3cda7c534944aeed53d3505af265fc3fdd7427feb68f"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.385872 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c","Type":"ContainerStarted","Data":"6ad0e7b63d2243fd59e10ec56c9d5021bb110bd220d68d720e4721e9ee4b65e4"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.385892 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c","Type":"ContainerStarted","Data":"387bf987a9cc9bf9d725029d4e36366487ceb5d0ee77282562e0a0a9ffdba49a"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.387550 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"d4f36444-104b-4cb9-befb-02e72742474b","Type":"ContainerStarted","Data":"a4531995f946c1a53ae747b0e5be211030db36a8ef2f7ad1836d3e6bb9384993"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.387665 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"d4f36444-104b-4cb9-befb-02e72742474b","Type":"ContainerStarted","Data":"1dbaeb37902106043e704b85587ff46c5a7f339a4f838eeaba99338fddffb07b"} Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.402550 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.402524808 podStartE2EDuration="3.402524808s" podCreationTimestamp="2025-11-28 14:58:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:31.400205736 +0000 UTC m=+5361.524147183" watchObservedRunningTime="2025-11-28 14:58:31.402524808 +0000 UTC m=+5361.526466245" Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.439580 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.439545757 podStartE2EDuration="3.439545757s" podCreationTimestamp="2025-11-28 14:58:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:31.438208541 +0000 UTC m=+5361.562149978" watchObservedRunningTime="2025-11-28 14:58:31.439545757 +0000 UTC m=+5361.563487194" Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.446870 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=3.446854632 podStartE2EDuration="3.446854632s" podCreationTimestamp="2025-11-28 14:58:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:31.422378569 +0000 UTC m=+5361.546319996" watchObservedRunningTime="2025-11-28 14:58:31.446854632 +0000 UTC m=+5361.570796069" Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.812372 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 28 14:58:31 crc kubenswrapper[4857]: W1128 14:58:31.818121 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5369c4e6_8532_4d9c_88a7_7fbb5c22d44e.slice/crio-a240546cc28917ff3e2803dc6c67f4a0abe44c93e67d50f63b76c97444a48ca6 WatchSource:0}: Error finding container a240546cc28917ff3e2803dc6c67f4a0abe44c93e67d50f63b76c97444a48ca6: Status 404 returned error can't find the container with id a240546cc28917ff3e2803dc6c67f4a0abe44c93e67d50f63b76c97444a48ca6 Nov 28 14:58:31 crc kubenswrapper[4857]: I1128 14:58:31.944754 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 28 14:58:31 crc kubenswrapper[4857]: W1128 14:58:31.956769 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b81238d_98a7_4c18_930f_6bcce93525e9.slice/crio-941135f83ebdedbdab8eb6047e068b3023694449c38db987e3fcfd134d2e9d9e WatchSource:0}: Error finding container 941135f83ebdedbdab8eb6047e068b3023694449c38db987e3fcfd134d2e9d9e: Status 404 returned error can't find the container with id 941135f83ebdedbdab8eb6047e068b3023694449c38db987e3fcfd134d2e9d9e Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.397708 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4b81238d-98a7-4c18-930f-6bcce93525e9","Type":"ContainerStarted","Data":"ff1646e461a30623e5826d4f2306d61e3332dee4d49b1c928c33d464f0dd7145"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.398097 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4b81238d-98a7-4c18-930f-6bcce93525e9","Type":"ContainerStarted","Data":"3f28cc655c06bd8c27d5ced1ca31d9a94927cb0eac73d4c3e2a42a523f3fa4a8"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.398110 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"4b81238d-98a7-4c18-930f-6bcce93525e9","Type":"ContainerStarted","Data":"941135f83ebdedbdab8eb6047e068b3023694449c38db987e3fcfd134d2e9d9e"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.399833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"d4f36444-104b-4cb9-befb-02e72742474b","Type":"ContainerStarted","Data":"1290f20e81b36a70328189efe4c3c5750d316c8412dbf0747b04d7daf3dd4648"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.402268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e","Type":"ContainerStarted","Data":"4cfe74e76ced40a19e546351e12258184f8609d83e5f1ab33399f8bba6d03db8"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.402367 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e","Type":"ContainerStarted","Data":"3ca43edea06fc9d75a1d162f350d3ee0ea8527fd77db2dc5e5c1c6dfb6e4d7b8"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.402434 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"5369c4e6-8532-4d9c-88a7-7fbb5c22d44e","Type":"ContainerStarted","Data":"a240546cc28917ff3e2803dc6c67f4a0abe44c93e67d50f63b76c97444a48ca6"} Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.424197 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=4.424177368 podStartE2EDuration="4.424177368s" podCreationTimestamp="2025-11-28 14:58:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:32.416918284 +0000 UTC m=+5362.540859731" watchObservedRunningTime="2025-11-28 14:58:32.424177368 +0000 UTC m=+5362.548118805" Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.440551 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=4.440524794 podStartE2EDuration="4.440524794s" podCreationTimestamp="2025-11-28 14:58:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:32.434887454 +0000 UTC m=+5362.558828891" watchObservedRunningTime="2025-11-28 14:58:32.440524794 +0000 UTC m=+5362.564466241" Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.462912 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=4.462884411 podStartE2EDuration="4.462884411s" podCreationTimestamp="2025-11-28 14:58:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:32.459316026 +0000 UTC m=+5362.583257463" watchObservedRunningTime="2025-11-28 14:58:32.462884411 +0000 UTC m=+5362.586825868" Nov 28 14:58:32 crc kubenswrapper[4857]: I1128 14:58:32.910158 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:33 crc kubenswrapper[4857]: I1128 14:58:33.234447 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:33 crc kubenswrapper[4857]: I1128 14:58:33.234563 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:33 crc kubenswrapper[4857]: I1128 14:58:33.255520 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:33 crc kubenswrapper[4857]: I1128 14:58:33.277411 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:33 crc kubenswrapper[4857]: I1128 14:58:33.436115 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:34 crc kubenswrapper[4857]: I1128 14:58:34.910462 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:35 crc kubenswrapper[4857]: I1128 14:58:35.234726 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:35 crc kubenswrapper[4857]: I1128 14:58:35.234788 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:35 crc kubenswrapper[4857]: I1128 14:58:35.255617 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:35 crc kubenswrapper[4857]: I1128 14:58:35.277307 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:35 crc kubenswrapper[4857]: I1128 14:58:35.436249 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:35 crc kubenswrapper[4857]: I1128 14:58:35.973968 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.056997 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.297595 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.307724 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.326981 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.348358 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.373968 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fd9684f9f-t67h8"] Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.375256 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.379695 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.392243 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fd9684f9f-t67h8"] Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.396929 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.409319 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.478038 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtjcj\" (UniqueName: \"kubernetes.io/projected/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-kube-api-access-dtjcj\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.478187 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-config\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.478213 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-ovsdbserver-nb\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.478246 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-dns-svc\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.488711 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.513302 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.579738 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-config\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.579793 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-ovsdbserver-nb\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.579840 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-dns-svc\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.579878 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtjcj\" (UniqueName: \"kubernetes.io/projected/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-kube-api-access-dtjcj\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.581412 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-config\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.581933 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-ovsdbserver-nb\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.582775 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-dns-svc\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.622274 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtjcj\" (UniqueName: \"kubernetes.io/projected/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-kube-api-access-dtjcj\") pod \"dnsmasq-dns-fd9684f9f-t67h8\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.686295 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fd9684f9f-t67h8"] Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.686918 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.746076 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-764bd786bf-qn86q"] Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.747544 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.760631 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.764033 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764bd786bf-qn86q"] Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.885070 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-sb\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.885125 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-nb\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.885230 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-dns-svc\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.885250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-config\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.885270 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jksjf\" (UniqueName: \"kubernetes.io/projected/0db56d3b-4f53-4cb7-9487-e354c68f6346-kube-api-access-jksjf\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.986724 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-dns-svc\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.986771 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-config\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.987784 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-config\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.986795 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jksjf\" (UniqueName: \"kubernetes.io/projected/0db56d3b-4f53-4cb7-9487-e354c68f6346-kube-api-access-jksjf\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.987820 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-dns-svc\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.987859 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-sb\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.988514 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-sb\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.988556 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-nb\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:36 crc kubenswrapper[4857]: I1128 14:58:36.988573 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-nb\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:37 crc kubenswrapper[4857]: I1128 14:58:37.008605 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jksjf\" (UniqueName: \"kubernetes.io/projected/0db56d3b-4f53-4cb7-9487-e354c68f6346-kube-api-access-jksjf\") pod \"dnsmasq-dns-764bd786bf-qn86q\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:37 crc kubenswrapper[4857]: I1128 14:58:37.099563 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:37 crc kubenswrapper[4857]: I1128 14:58:37.181885 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fd9684f9f-t67h8"] Nov 28 14:58:37 crc kubenswrapper[4857]: I1128 14:58:37.458409 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-764bd786bf-qn86q"] Nov 28 14:58:37 crc kubenswrapper[4857]: I1128 14:58:37.472785 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" event={"ID":"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70","Type":"ContainerStarted","Data":"72ac833f06a83d4d056ba1b75e228ba97fb5e7556be1bb377d654fa2a33f4e8e"} Nov 28 14:58:37 crc kubenswrapper[4857]: W1128 14:58:37.475561 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0db56d3b_4f53_4cb7_9487_e354c68f6346.slice/crio-dfcda5e20a798bbae5199c119792eabfe57fd68c417a3beeeca5ed0e006f7c4f WatchSource:0}: Error finding container dfcda5e20a798bbae5199c119792eabfe57fd68c417a3beeeca5ed0e006f7c4f: Status 404 returned error can't find the container with id dfcda5e20a798bbae5199c119792eabfe57fd68c417a3beeeca5ed0e006f7c4f Nov 28 14:58:38 crc kubenswrapper[4857]: I1128 14:58:38.481753 4857 generic.go:334] "Generic (PLEG): container finished" podID="4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" containerID="7ef6d8e586b270233df314ae6ce777e6fce015180ec1f86fda5abc5ff74bcd0c" exitCode=0 Nov 28 14:58:38 crc kubenswrapper[4857]: I1128 14:58:38.481867 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" event={"ID":"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70","Type":"ContainerDied","Data":"7ef6d8e586b270233df314ae6ce777e6fce015180ec1f86fda5abc5ff74bcd0c"} Nov 28 14:58:38 crc kubenswrapper[4857]: I1128 14:58:38.491039 4857 generic.go:334] "Generic (PLEG): container finished" podID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerID="979a6c7f6b95eed556017d8765bbabb1f12e983ef8f68673d7c4ccdde0f578df" exitCode=0 Nov 28 14:58:38 crc kubenswrapper[4857]: I1128 14:58:38.491335 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" event={"ID":"0db56d3b-4f53-4cb7-9487-e354c68f6346","Type":"ContainerDied","Data":"979a6c7f6b95eed556017d8765bbabb1f12e983ef8f68673d7c4ccdde0f578df"} Nov 28 14:58:38 crc kubenswrapper[4857]: I1128 14:58:38.491445 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" event={"ID":"0db56d3b-4f53-4cb7-9487-e354c68f6346","Type":"ContainerStarted","Data":"dfcda5e20a798bbae5199c119792eabfe57fd68c417a3beeeca5ed0e006f7c4f"} Nov 28 14:58:38 crc kubenswrapper[4857]: I1128 14:58:38.885100 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.028502 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-config\") pod \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.028557 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtjcj\" (UniqueName: \"kubernetes.io/projected/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-kube-api-access-dtjcj\") pod \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.028607 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-ovsdbserver-nb\") pod \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.028812 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-dns-svc\") pod \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\" (UID: \"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70\") " Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.045513 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-kube-api-access-dtjcj" (OuterVolumeSpecName: "kube-api-access-dtjcj") pod "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" (UID: "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70"). InnerVolumeSpecName "kube-api-access-dtjcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.055163 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" (UID: "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.066424 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-config" (OuterVolumeSpecName: "config") pod "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" (UID: "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.068220 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" (UID: "4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.131018 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.131073 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-config\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.131096 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtjcj\" (UniqueName: \"kubernetes.io/projected/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-kube-api-access-dtjcj\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.131119 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.500805 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" event={"ID":"4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70","Type":"ContainerDied","Data":"72ac833f06a83d4d056ba1b75e228ba97fb5e7556be1bb377d654fa2a33f4e8e"} Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.500841 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fd9684f9f-t67h8" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.501267 4857 scope.go:117] "RemoveContainer" containerID="7ef6d8e586b270233df314ae6ce777e6fce015180ec1f86fda5abc5ff74bcd0c" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.507356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" event={"ID":"0db56d3b-4f53-4cb7-9487-e354c68f6346","Type":"ContainerStarted","Data":"6968c94745783c56961b5594978f2546c7fe813be98b7aa148992629cb221b09"} Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.507586 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.527000 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" podStartSLOduration=3.526937577 podStartE2EDuration="3.526937577s" podCreationTimestamp="2025-11-28 14:58:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:39.523488105 +0000 UTC m=+5369.647429582" watchObservedRunningTime="2025-11-28 14:58:39.526937577 +0000 UTC m=+5369.650879024" Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.588750 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fd9684f9f-t67h8"] Nov 28 14:58:39 crc kubenswrapper[4857]: I1128 14:58:39.595364 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fd9684f9f-t67h8"] Nov 28 14:58:40 crc kubenswrapper[4857]: I1128 14:58:40.239024 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:58:40 crc kubenswrapper[4857]: E1128 14:58:40.239591 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:58:40 crc kubenswrapper[4857]: I1128 14:58:40.248535 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" path="/var/lib/kubelet/pods/4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70/volumes" Nov 28 14:58:40 crc kubenswrapper[4857]: I1128 14:58:40.328710 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 28 14:58:40 crc kubenswrapper[4857]: I1128 14:58:40.485263 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.365018 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 28 14:58:43 crc kubenswrapper[4857]: E1128 14:58:43.365747 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" containerName="init" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.365764 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" containerName="init" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.366008 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f8306d1-ec5d-46c0-b5be-b17cb6e2fb70" containerName="init" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.366718 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.371777 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.381444 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.513392 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f83a1a23-ed01-4d84-b3bf-e55e4268e093-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.513634 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk44g\" (UniqueName: \"kubernetes.io/projected/f83a1a23-ed01-4d84-b3bf-e55e4268e093-kube-api-access-gk44g\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.513918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.615693 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk44g\" (UniqueName: \"kubernetes.io/projected/f83a1a23-ed01-4d84-b3bf-e55e4268e093-kube-api-access-gk44g\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.615793 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.615858 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f83a1a23-ed01-4d84-b3bf-e55e4268e093-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.620910 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.621359 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/51b161c12a50dcfbe57957e6266d8cc4584f23e363dca86bcd83f9165ec6cbd7/globalmount\"" pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.625079 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f83a1a23-ed01-4d84-b3bf-e55e4268e093-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.637340 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk44g\" (UniqueName: \"kubernetes.io/projected/f83a1a23-ed01-4d84-b3bf-e55e4268e093-kube-api-access-gk44g\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.652207 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") pod \"ovn-copy-data\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " pod="openstack/ovn-copy-data" Nov 28 14:58:43 crc kubenswrapper[4857]: I1128 14:58:43.688234 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 14:58:44 crc kubenswrapper[4857]: I1128 14:58:44.085439 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 14:58:44 crc kubenswrapper[4857]: I1128 14:58:44.564140 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f83a1a23-ed01-4d84-b3bf-e55e4268e093","Type":"ContainerStarted","Data":"608698b4649c31cd4fbda1000524a37011504be56433b2e00e645a65704dcdef"} Nov 28 14:58:44 crc kubenswrapper[4857]: I1128 14:58:44.564197 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f83a1a23-ed01-4d84-b3bf-e55e4268e093","Type":"ContainerStarted","Data":"006d1339d7b1aec88d4c011b40ad08dabc59f6a5df0a206301b9bb7d530ae4af"} Nov 28 14:58:44 crc kubenswrapper[4857]: I1128 14:58:44.590138 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=2.590103847 podStartE2EDuration="2.590103847s" podCreationTimestamp="2025-11-28 14:58:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:44.581019574 +0000 UTC m=+5374.704961021" watchObservedRunningTime="2025-11-28 14:58:44.590103847 +0000 UTC m=+5374.714045294" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.101283 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.197796 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-h9n7d"] Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.198162 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" podUID="60abd7c6-2682-47cb-a624-108b876e1988" containerName="dnsmasq-dns" containerID="cri-o://3fa40ccadd6faab302ff4e7393bac4e81cf391fa652aa15d59c8c82ef1df6d71" gracePeriod=10 Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.592874 4857 generic.go:334] "Generic (PLEG): container finished" podID="60abd7c6-2682-47cb-a624-108b876e1988" containerID="3fa40ccadd6faab302ff4e7393bac4e81cf391fa652aa15d59c8c82ef1df6d71" exitCode=0 Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.593024 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" event={"ID":"60abd7c6-2682-47cb-a624-108b876e1988","Type":"ContainerDied","Data":"3fa40ccadd6faab302ff4e7393bac4e81cf391fa652aa15d59c8c82ef1df6d71"} Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.667296 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.805562 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-dns-svc\") pod \"60abd7c6-2682-47cb-a624-108b876e1988\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.805744 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llqfg\" (UniqueName: \"kubernetes.io/projected/60abd7c6-2682-47cb-a624-108b876e1988-kube-api-access-llqfg\") pod \"60abd7c6-2682-47cb-a624-108b876e1988\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.805767 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-config\") pod \"60abd7c6-2682-47cb-a624-108b876e1988\" (UID: \"60abd7c6-2682-47cb-a624-108b876e1988\") " Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.814494 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60abd7c6-2682-47cb-a624-108b876e1988-kube-api-access-llqfg" (OuterVolumeSpecName: "kube-api-access-llqfg") pod "60abd7c6-2682-47cb-a624-108b876e1988" (UID: "60abd7c6-2682-47cb-a624-108b876e1988"). InnerVolumeSpecName "kube-api-access-llqfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.859229 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "60abd7c6-2682-47cb-a624-108b876e1988" (UID: "60abd7c6-2682-47cb-a624-108b876e1988"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.872091 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-config" (OuterVolumeSpecName: "config") pod "60abd7c6-2682-47cb-a624-108b876e1988" (UID: "60abd7c6-2682-47cb-a624-108b876e1988"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.907623 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.907660 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llqfg\" (UniqueName: \"kubernetes.io/projected/60abd7c6-2682-47cb-a624-108b876e1988-kube-api-access-llqfg\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:47 crc kubenswrapper[4857]: I1128 14:58:47.907672 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/60abd7c6-2682-47cb-a624-108b876e1988-config\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:48 crc kubenswrapper[4857]: I1128 14:58:48.604965 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" event={"ID":"60abd7c6-2682-47cb-a624-108b876e1988","Type":"ContainerDied","Data":"581af98d37924bd20f175afe979b9a1cecd3ccac0bd665a4644c9df039d88263"} Nov 28 14:58:48 crc kubenswrapper[4857]: I1128 14:58:48.605483 4857 scope.go:117] "RemoveContainer" containerID="3fa40ccadd6faab302ff4e7393bac4e81cf391fa652aa15d59c8c82ef1df6d71" Nov 28 14:58:48 crc kubenswrapper[4857]: I1128 14:58:48.605039 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b7946d7b9-h9n7d" Nov 28 14:58:48 crc kubenswrapper[4857]: I1128 14:58:48.635313 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-h9n7d"] Nov 28 14:58:48 crc kubenswrapper[4857]: I1128 14:58:48.643276 4857 scope.go:117] "RemoveContainer" containerID="3456c92fa9ac98bb0eda08d107ff5ab1deabca3d28be4b613174c15a28c7f295" Nov 28 14:58:48 crc kubenswrapper[4857]: I1128 14:58:48.643596 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b7946d7b9-h9n7d"] Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.238434 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60abd7c6-2682-47cb-a624-108b876e1988" path="/var/lib/kubelet/pods/60abd7c6-2682-47cb-a624-108b876e1988/volumes" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.345050 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 28 14:58:50 crc kubenswrapper[4857]: E1128 14:58:50.345393 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60abd7c6-2682-47cb-a624-108b876e1988" containerName="dnsmasq-dns" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.345413 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="60abd7c6-2682-47cb-a624-108b876e1988" containerName="dnsmasq-dns" Nov 28 14:58:50 crc kubenswrapper[4857]: E1128 14:58:50.345431 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60abd7c6-2682-47cb-a624-108b876e1988" containerName="init" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.345439 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="60abd7c6-2682-47cb-a624-108b876e1988" containerName="init" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.345589 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="60abd7c6-2682-47cb-a624-108b876e1988" containerName="dnsmasq-dns" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.346429 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.350663 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.351158 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-p479b" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.362102 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.441932 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.462299 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-scripts\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.462348 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-config\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.462515 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.462608 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.462767 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5b4z7\" (UniqueName: \"kubernetes.io/projected/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-kube-api-access-5b4z7\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.564818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-scripts\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.564864 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-config\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.564915 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.564960 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.564991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5b4z7\" (UniqueName: \"kubernetes.io/projected/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-kube-api-access-5b4z7\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.566058 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-scripts\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.566547 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-config\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.566792 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.574537 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.583503 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5b4z7\" (UniqueName: \"kubernetes.io/projected/09121b50-2dda-44c5-8ba4-3f9a1f55e8e6-kube-api-access-5b4z7\") pod \"ovn-northd-0\" (UID: \"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6\") " pod="openstack/ovn-northd-0" Nov 28 14:58:50 crc kubenswrapper[4857]: I1128 14:58:50.719815 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 28 14:58:51 crc kubenswrapper[4857]: I1128 14:58:51.218060 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 28 14:58:51 crc kubenswrapper[4857]: W1128 14:58:51.229089 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09121b50_2dda_44c5_8ba4_3f9a1f55e8e6.slice/crio-57a9fe9663e8f2d3a167b716b5e3ba0f2c10ba208067088389c0d8685cb84d4c WatchSource:0}: Error finding container 57a9fe9663e8f2d3a167b716b5e3ba0f2c10ba208067088389c0d8685cb84d4c: Status 404 returned error can't find the container with id 57a9fe9663e8f2d3a167b716b5e3ba0f2c10ba208067088389c0d8685cb84d4c Nov 28 14:58:51 crc kubenswrapper[4857]: I1128 14:58:51.638425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6","Type":"ContainerStarted","Data":"40bd5823556b1684b9f1a4adf7b0bd2864d3534018fd5ef874b3497d4cb08113"} Nov 28 14:58:51 crc kubenswrapper[4857]: I1128 14:58:51.639124 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6","Type":"ContainerStarted","Data":"577a8a979a6c24347333c1d98c2653b8fcaca72a7f14ab1a1d01ef7dbc0cedbf"} Nov 28 14:58:51 crc kubenswrapper[4857]: I1128 14:58:51.639153 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"09121b50-2dda-44c5-8ba4-3f9a1f55e8e6","Type":"ContainerStarted","Data":"57a9fe9663e8f2d3a167b716b5e3ba0f2c10ba208067088389c0d8685cb84d4c"} Nov 28 14:58:51 crc kubenswrapper[4857]: I1128 14:58:51.639184 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 28 14:58:51 crc kubenswrapper[4857]: I1128 14:58:51.672698 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=1.672670017 podStartE2EDuration="1.672670017s" podCreationTimestamp="2025-11-28 14:58:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:51.661090908 +0000 UTC m=+5381.785032345" watchObservedRunningTime="2025-11-28 14:58:51.672670017 +0000 UTC m=+5381.796611484" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.317545 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mpcw7"] Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.324442 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.333273 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mpcw7"] Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.397703 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgfqb\" (UniqueName: \"kubernetes.io/projected/9efeffff-1d6d-4174-8fd8-deea5db88bb1-kube-api-access-xgfqb\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.397784 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-catalog-content\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.397819 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-utilities\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.499376 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgfqb\" (UniqueName: \"kubernetes.io/projected/9efeffff-1d6d-4174-8fd8-deea5db88bb1-kube-api-access-xgfqb\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.499853 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-catalog-content\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.499903 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-utilities\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.500482 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-utilities\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.500681 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-catalog-content\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.521981 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgfqb\" (UniqueName: \"kubernetes.io/projected/9efeffff-1d6d-4174-8fd8-deea5db88bb1-kube-api-access-xgfqb\") pod \"community-operators-mpcw7\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:52 crc kubenswrapper[4857]: I1128 14:58:52.651871 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:58:53 crc kubenswrapper[4857]: I1128 14:58:53.195437 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mpcw7"] Nov 28 14:58:53 crc kubenswrapper[4857]: W1128 14:58:53.208516 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9efeffff_1d6d_4174_8fd8_deea5db88bb1.slice/crio-63fba123c863021678c67761ae6c17b1970323542f8a9951873ae4a65c5c31b7 WatchSource:0}: Error finding container 63fba123c863021678c67761ae6c17b1970323542f8a9951873ae4a65c5c31b7: Status 404 returned error can't find the container with id 63fba123c863021678c67761ae6c17b1970323542f8a9951873ae4a65c5c31b7 Nov 28 14:58:53 crc kubenswrapper[4857]: I1128 14:58:53.657662 4857 generic.go:334] "Generic (PLEG): container finished" podID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerID="3baff6510f8a2e626f59d7bb0fd4871cf8ce8a5653410ee1d0f443864bec4fc5" exitCode=0 Nov 28 14:58:53 crc kubenswrapper[4857]: I1128 14:58:53.657758 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerDied","Data":"3baff6510f8a2e626f59d7bb0fd4871cf8ce8a5653410ee1d0f443864bec4fc5"} Nov 28 14:58:53 crc kubenswrapper[4857]: I1128 14:58:53.658151 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerStarted","Data":"63fba123c863021678c67761ae6c17b1970323542f8a9951873ae4a65c5c31b7"} Nov 28 14:58:54 crc kubenswrapper[4857]: I1128 14:58:54.671916 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerStarted","Data":"6a5aad84ec212bcba85d5b36f942483ddda595a2a0edacbd0f90f0d6499f7106"} Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.229778 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:58:55 crc kubenswrapper[4857]: E1128 14:58:55.230258 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.672088 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-qfdtd"] Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.673253 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.682427 4857 generic.go:334] "Generic (PLEG): container finished" podID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerID="6a5aad84ec212bcba85d5b36f942483ddda595a2a0edacbd0f90f0d6499f7106" exitCode=0 Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.682506 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerDied","Data":"6a5aad84ec212bcba85d5b36f942483ddda595a2a0edacbd0f90f0d6499f7106"} Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.688895 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-qfdtd"] Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.768245 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e369f54d-db0e-4b44-943c-b08090c27050-operator-scripts\") pod \"keystone-db-create-qfdtd\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.768365 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmgmx\" (UniqueName: \"kubernetes.io/projected/e369f54d-db0e-4b44-943c-b08090c27050-kube-api-access-zmgmx\") pod \"keystone-db-create-qfdtd\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.772222 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-1579-account-create-update-pt6kg"] Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.773733 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.775676 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.783750 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-1579-account-create-update-pt6kg"] Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.870692 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xxxx\" (UniqueName: \"kubernetes.io/projected/abde92d0-5a59-425b-8093-fa103d0a10e0-kube-api-access-6xxxx\") pod \"keystone-1579-account-create-update-pt6kg\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.870801 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmgmx\" (UniqueName: \"kubernetes.io/projected/e369f54d-db0e-4b44-943c-b08090c27050-kube-api-access-zmgmx\") pod \"keystone-db-create-qfdtd\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.870910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abde92d0-5a59-425b-8093-fa103d0a10e0-operator-scripts\") pod \"keystone-1579-account-create-update-pt6kg\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.870979 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e369f54d-db0e-4b44-943c-b08090c27050-operator-scripts\") pod \"keystone-db-create-qfdtd\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.871963 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e369f54d-db0e-4b44-943c-b08090c27050-operator-scripts\") pod \"keystone-db-create-qfdtd\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.892465 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmgmx\" (UniqueName: \"kubernetes.io/projected/e369f54d-db0e-4b44-943c-b08090c27050-kube-api-access-zmgmx\") pod \"keystone-db-create-qfdtd\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.973530 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xxxx\" (UniqueName: \"kubernetes.io/projected/abde92d0-5a59-425b-8093-fa103d0a10e0-kube-api-access-6xxxx\") pod \"keystone-1579-account-create-update-pt6kg\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.973691 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abde92d0-5a59-425b-8093-fa103d0a10e0-operator-scripts\") pod \"keystone-1579-account-create-update-pt6kg\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.975114 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abde92d0-5a59-425b-8093-fa103d0a10e0-operator-scripts\") pod \"keystone-1579-account-create-update-pt6kg\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:55 crc kubenswrapper[4857]: I1128 14:58:55.991861 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xxxx\" (UniqueName: \"kubernetes.io/projected/abde92d0-5a59-425b-8093-fa103d0a10e0-kube-api-access-6xxxx\") pod \"keystone-1579-account-create-update-pt6kg\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.001719 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.098726 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:56 crc kubenswrapper[4857]: W1128 14:58:56.486341 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode369f54d_db0e_4b44_943c_b08090c27050.slice/crio-ff2f1805ee8cd5e4d37565c5e029fa13ac893bec55ff49afcceac83eecd8ba17 WatchSource:0}: Error finding container ff2f1805ee8cd5e4d37565c5e029fa13ac893bec55ff49afcceac83eecd8ba17: Status 404 returned error can't find the container with id ff2f1805ee8cd5e4d37565c5e029fa13ac893bec55ff49afcceac83eecd8ba17 Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.487186 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-qfdtd"] Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.607528 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-1579-account-create-update-pt6kg"] Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.701581 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerStarted","Data":"62ac0b82ba99f6cf5bcf3a4a9eb216069875e582c58671d65efe3f41ec3cf77b"} Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.703473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qfdtd" event={"ID":"e369f54d-db0e-4b44-943c-b08090c27050","Type":"ContainerStarted","Data":"0f0a4a102d3e912ff3e948850ebca5e4a419f9611cd8dbb5e75ff81a38906ae8"} Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.703544 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qfdtd" event={"ID":"e369f54d-db0e-4b44-943c-b08090c27050","Type":"ContainerStarted","Data":"ff2f1805ee8cd5e4d37565c5e029fa13ac893bec55ff49afcceac83eecd8ba17"} Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.712290 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1579-account-create-update-pt6kg" event={"ID":"abde92d0-5a59-425b-8093-fa103d0a10e0","Type":"ContainerStarted","Data":"470d262ba9a6210a945e4a63103bb0179cab1e79f3a13da6fc72d6ccd1c7aa0f"} Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.726874 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mpcw7" podStartSLOduration=2.194118597 podStartE2EDuration="4.726855548s" podCreationTimestamp="2025-11-28 14:58:52 +0000 UTC" firstStartedPulling="2025-11-28 14:58:53.659538898 +0000 UTC m=+5383.783480345" lastFinishedPulling="2025-11-28 14:58:56.192275859 +0000 UTC m=+5386.316217296" observedRunningTime="2025-11-28 14:58:56.723477218 +0000 UTC m=+5386.847418645" watchObservedRunningTime="2025-11-28 14:58:56.726855548 +0000 UTC m=+5386.850796985" Nov 28 14:58:56 crc kubenswrapper[4857]: I1128 14:58:56.746442 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-qfdtd" podStartSLOduration=1.74641449 podStartE2EDuration="1.74641449s" podCreationTimestamp="2025-11-28 14:58:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:58:56.737266766 +0000 UTC m=+5386.861208203" watchObservedRunningTime="2025-11-28 14:58:56.74641449 +0000 UTC m=+5386.870355927" Nov 28 14:58:57 crc kubenswrapper[4857]: I1128 14:58:57.731926 4857 generic.go:334] "Generic (PLEG): container finished" podID="abde92d0-5a59-425b-8093-fa103d0a10e0" containerID="84be4a0ed1b3a0a2ef55c4da5ce29451349d74551ad2cda4461c9f1d26d8526d" exitCode=0 Nov 28 14:58:57 crc kubenswrapper[4857]: I1128 14:58:57.732137 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1579-account-create-update-pt6kg" event={"ID":"abde92d0-5a59-425b-8093-fa103d0a10e0","Type":"ContainerDied","Data":"84be4a0ed1b3a0a2ef55c4da5ce29451349d74551ad2cda4461c9f1d26d8526d"} Nov 28 14:58:57 crc kubenswrapper[4857]: I1128 14:58:57.740194 4857 generic.go:334] "Generic (PLEG): container finished" podID="e369f54d-db0e-4b44-943c-b08090c27050" containerID="0f0a4a102d3e912ff3e948850ebca5e4a419f9611cd8dbb5e75ff81a38906ae8" exitCode=0 Nov 28 14:58:57 crc kubenswrapper[4857]: I1128 14:58:57.741661 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qfdtd" event={"ID":"e369f54d-db0e-4b44-943c-b08090c27050","Type":"ContainerDied","Data":"0f0a4a102d3e912ff3e948850ebca5e4a419f9611cd8dbb5e75ff81a38906ae8"} Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.175217 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.182064 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.348055 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e369f54d-db0e-4b44-943c-b08090c27050-operator-scripts\") pod \"e369f54d-db0e-4b44-943c-b08090c27050\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.349078 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xxxx\" (UniqueName: \"kubernetes.io/projected/abde92d0-5a59-425b-8093-fa103d0a10e0-kube-api-access-6xxxx\") pod \"abde92d0-5a59-425b-8093-fa103d0a10e0\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.349333 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e369f54d-db0e-4b44-943c-b08090c27050-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e369f54d-db0e-4b44-943c-b08090c27050" (UID: "e369f54d-db0e-4b44-943c-b08090c27050"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.349353 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abde92d0-5a59-425b-8093-fa103d0a10e0-operator-scripts\") pod \"abde92d0-5a59-425b-8093-fa103d0a10e0\" (UID: \"abde92d0-5a59-425b-8093-fa103d0a10e0\") " Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.349516 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmgmx\" (UniqueName: \"kubernetes.io/projected/e369f54d-db0e-4b44-943c-b08090c27050-kube-api-access-zmgmx\") pod \"e369f54d-db0e-4b44-943c-b08090c27050\" (UID: \"e369f54d-db0e-4b44-943c-b08090c27050\") " Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.350183 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e369f54d-db0e-4b44-943c-b08090c27050-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.350585 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abde92d0-5a59-425b-8093-fa103d0a10e0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "abde92d0-5a59-425b-8093-fa103d0a10e0" (UID: "abde92d0-5a59-425b-8093-fa103d0a10e0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.358150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abde92d0-5a59-425b-8093-fa103d0a10e0-kube-api-access-6xxxx" (OuterVolumeSpecName: "kube-api-access-6xxxx") pod "abde92d0-5a59-425b-8093-fa103d0a10e0" (UID: "abde92d0-5a59-425b-8093-fa103d0a10e0"). InnerVolumeSpecName "kube-api-access-6xxxx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.359892 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e369f54d-db0e-4b44-943c-b08090c27050-kube-api-access-zmgmx" (OuterVolumeSpecName: "kube-api-access-zmgmx") pod "e369f54d-db0e-4b44-943c-b08090c27050" (UID: "e369f54d-db0e-4b44-943c-b08090c27050"). InnerVolumeSpecName "kube-api-access-zmgmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.451312 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abde92d0-5a59-425b-8093-fa103d0a10e0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.451457 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmgmx\" (UniqueName: \"kubernetes.io/projected/e369f54d-db0e-4b44-943c-b08090c27050-kube-api-access-zmgmx\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.451473 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xxxx\" (UniqueName: \"kubernetes.io/projected/abde92d0-5a59-425b-8093-fa103d0a10e0-kube-api-access-6xxxx\") on node \"crc\" DevicePath \"\"" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.763778 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-qfdtd" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.763813 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-qfdtd" event={"ID":"e369f54d-db0e-4b44-943c-b08090c27050","Type":"ContainerDied","Data":"ff2f1805ee8cd5e4d37565c5e029fa13ac893bec55ff49afcceac83eecd8ba17"} Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.763901 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff2f1805ee8cd5e4d37565c5e029fa13ac893bec55ff49afcceac83eecd8ba17" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.768037 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-1579-account-create-update-pt6kg" event={"ID":"abde92d0-5a59-425b-8093-fa103d0a10e0","Type":"ContainerDied","Data":"470d262ba9a6210a945e4a63103bb0179cab1e79f3a13da6fc72d6ccd1c7aa0f"} Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.768099 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="470d262ba9a6210a945e4a63103bb0179cab1e79f3a13da6fc72d6ccd1c7aa0f" Nov 28 14:58:59 crc kubenswrapper[4857]: I1128 14:58:59.768178 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-1579-account-create-update-pt6kg" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.451256 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-jgj4x"] Nov 28 14:59:01 crc kubenswrapper[4857]: E1128 14:59:01.452418 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abde92d0-5a59-425b-8093-fa103d0a10e0" containerName="mariadb-account-create-update" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.452439 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="abde92d0-5a59-425b-8093-fa103d0a10e0" containerName="mariadb-account-create-update" Nov 28 14:59:01 crc kubenswrapper[4857]: E1128 14:59:01.452492 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e369f54d-db0e-4b44-943c-b08090c27050" containerName="mariadb-database-create" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.452501 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e369f54d-db0e-4b44-943c-b08090c27050" containerName="mariadb-database-create" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.452753 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e369f54d-db0e-4b44-943c-b08090c27050" containerName="mariadb-database-create" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.452773 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="abde92d0-5a59-425b-8093-fa103d0a10e0" containerName="mariadb-account-create-update" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.453616 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.456702 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.456942 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-x5726" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.457228 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.457402 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.471232 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-jgj4x"] Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.604900 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-config-data\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.604972 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2svgp\" (UniqueName: \"kubernetes.io/projected/d673f490-5579-4fe5-b65c-0d1f9da6d976-kube-api-access-2svgp\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.605701 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-combined-ca-bundle\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.708179 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-combined-ca-bundle\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.708306 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-config-data\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.708337 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2svgp\" (UniqueName: \"kubernetes.io/projected/d673f490-5579-4fe5-b65c-0d1f9da6d976-kube-api-access-2svgp\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.715178 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-combined-ca-bundle\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.719142 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-config-data\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.732013 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2svgp\" (UniqueName: \"kubernetes.io/projected/d673f490-5579-4fe5-b65c-0d1f9da6d976-kube-api-access-2svgp\") pod \"keystone-db-sync-jgj4x\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:01 crc kubenswrapper[4857]: I1128 14:59:01.813344 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:02 crc kubenswrapper[4857]: I1128 14:59:02.361505 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-jgj4x"] Nov 28 14:59:02 crc kubenswrapper[4857]: I1128 14:59:02.653312 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:59:02 crc kubenswrapper[4857]: I1128 14:59:02.653780 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:59:02 crc kubenswrapper[4857]: I1128 14:59:02.745874 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:59:02 crc kubenswrapper[4857]: I1128 14:59:02.802206 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jgj4x" event={"ID":"d673f490-5579-4fe5-b65c-0d1f9da6d976","Type":"ContainerStarted","Data":"c3d0ae1c48bf077857839b23917aecbd4b3e06ba246b78998b499831e407d4d4"} Nov 28 14:59:02 crc kubenswrapper[4857]: I1128 14:59:02.890453 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:59:03 crc kubenswrapper[4857]: I1128 14:59:03.005660 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mpcw7"] Nov 28 14:59:03 crc kubenswrapper[4857]: I1128 14:59:03.818435 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jgj4x" event={"ID":"d673f490-5579-4fe5-b65c-0d1f9da6d976","Type":"ContainerStarted","Data":"24016fc6ac5c2b457402908c6c60f64b6624aceb7ba63ee8f808706129bb2d08"} Nov 28 14:59:03 crc kubenswrapper[4857]: I1128 14:59:03.846293 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-jgj4x" podStartSLOduration=2.846270652 podStartE2EDuration="2.846270652s" podCreationTimestamp="2025-11-28 14:59:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:59:03.839267735 +0000 UTC m=+5393.963209192" watchObservedRunningTime="2025-11-28 14:59:03.846270652 +0000 UTC m=+5393.970212079" Nov 28 14:59:04 crc kubenswrapper[4857]: I1128 14:59:04.828586 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mpcw7" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="registry-server" containerID="cri-o://62ac0b82ba99f6cf5bcf3a4a9eb216069875e582c58671d65efe3f41ec3cf77b" gracePeriod=2 Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.823199 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.840906 4857 generic.go:334] "Generic (PLEG): container finished" podID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerID="62ac0b82ba99f6cf5bcf3a4a9eb216069875e582c58671d65efe3f41ec3cf77b" exitCode=0 Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.841029 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerDied","Data":"62ac0b82ba99f6cf5bcf3a4a9eb216069875e582c58671d65efe3f41ec3cf77b"} Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.841155 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpcw7" event={"ID":"9efeffff-1d6d-4174-8fd8-deea5db88bb1","Type":"ContainerDied","Data":"63fba123c863021678c67761ae6c17b1970323542f8a9951873ae4a65c5c31b7"} Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.841188 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="63fba123c863021678c67761ae6c17b1970323542f8a9951873ae4a65c5c31b7" Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.860366 4857 generic.go:334] "Generic (PLEG): container finished" podID="d673f490-5579-4fe5-b65c-0d1f9da6d976" containerID="24016fc6ac5c2b457402908c6c60f64b6624aceb7ba63ee8f808706129bb2d08" exitCode=0 Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.860428 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jgj4x" event={"ID":"d673f490-5579-4fe5-b65c-0d1f9da6d976","Type":"ContainerDied","Data":"24016fc6ac5c2b457402908c6c60f64b6624aceb7ba63ee8f808706129bb2d08"} Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.883858 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.997495 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-utilities\") pod \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.997616 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgfqb\" (UniqueName: \"kubernetes.io/projected/9efeffff-1d6d-4174-8fd8-deea5db88bb1-kube-api-access-xgfqb\") pod \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.997636 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-catalog-content\") pod \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\" (UID: \"9efeffff-1d6d-4174-8fd8-deea5db88bb1\") " Nov 28 14:59:05 crc kubenswrapper[4857]: I1128 14:59:05.999088 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-utilities" (OuterVolumeSpecName: "utilities") pod "9efeffff-1d6d-4174-8fd8-deea5db88bb1" (UID: "9efeffff-1d6d-4174-8fd8-deea5db88bb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.005378 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9efeffff-1d6d-4174-8fd8-deea5db88bb1-kube-api-access-xgfqb" (OuterVolumeSpecName: "kube-api-access-xgfqb") pod "9efeffff-1d6d-4174-8fd8-deea5db88bb1" (UID: "9efeffff-1d6d-4174-8fd8-deea5db88bb1"). InnerVolumeSpecName "kube-api-access-xgfqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.077194 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9efeffff-1d6d-4174-8fd8-deea5db88bb1" (UID: "9efeffff-1d6d-4174-8fd8-deea5db88bb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.100325 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgfqb\" (UniqueName: \"kubernetes.io/projected/9efeffff-1d6d-4174-8fd8-deea5db88bb1-kube-api-access-xgfqb\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.100356 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.100366 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9efeffff-1d6d-4174-8fd8-deea5db88bb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.874193 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpcw7" Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.916489 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mpcw7"] Nov 28 14:59:06 crc kubenswrapper[4857]: I1128 14:59:06.927093 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mpcw7"] Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.264828 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.425515 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-config-data\") pod \"d673f490-5579-4fe5-b65c-0d1f9da6d976\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.426223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2svgp\" (UniqueName: \"kubernetes.io/projected/d673f490-5579-4fe5-b65c-0d1f9da6d976-kube-api-access-2svgp\") pod \"d673f490-5579-4fe5-b65c-0d1f9da6d976\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.426431 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-combined-ca-bundle\") pod \"d673f490-5579-4fe5-b65c-0d1f9da6d976\" (UID: \"d673f490-5579-4fe5-b65c-0d1f9da6d976\") " Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.436207 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d673f490-5579-4fe5-b65c-0d1f9da6d976-kube-api-access-2svgp" (OuterVolumeSpecName: "kube-api-access-2svgp") pod "d673f490-5579-4fe5-b65c-0d1f9da6d976" (UID: "d673f490-5579-4fe5-b65c-0d1f9da6d976"). InnerVolumeSpecName "kube-api-access-2svgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.479087 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d673f490-5579-4fe5-b65c-0d1f9da6d976" (UID: "d673f490-5579-4fe5-b65c-0d1f9da6d976"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.499150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-config-data" (OuterVolumeSpecName: "config-data") pod "d673f490-5579-4fe5-b65c-0d1f9da6d976" (UID: "d673f490-5579-4fe5-b65c-0d1f9da6d976"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.528385 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.528426 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d673f490-5579-4fe5-b65c-0d1f9da6d976-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.528439 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2svgp\" (UniqueName: \"kubernetes.io/projected/d673f490-5579-4fe5-b65c-0d1f9da6d976-kube-api-access-2svgp\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.894541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-jgj4x" event={"ID":"d673f490-5579-4fe5-b65c-0d1f9da6d976","Type":"ContainerDied","Data":"c3d0ae1c48bf077857839b23917aecbd4b3e06ba246b78998b499831e407d4d4"} Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.894611 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c3d0ae1c48bf077857839b23917aecbd4b3e06ba246b78998b499831e407d4d4" Nov 28 14:59:07 crc kubenswrapper[4857]: I1128 14:59:07.894707 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-jgj4x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178121 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84579fb745-sxp7x"] Nov 28 14:59:08 crc kubenswrapper[4857]: E1128 14:59:08.178475 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d673f490-5579-4fe5-b65c-0d1f9da6d976" containerName="keystone-db-sync" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178490 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d673f490-5579-4fe5-b65c-0d1f9da6d976" containerName="keystone-db-sync" Nov 28 14:59:08 crc kubenswrapper[4857]: E1128 14:59:08.178520 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="extract-content" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178529 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="extract-content" Nov 28 14:59:08 crc kubenswrapper[4857]: E1128 14:59:08.178549 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="extract-utilities" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178558 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="extract-utilities" Nov 28 14:59:08 crc kubenswrapper[4857]: E1128 14:59:08.178573 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="registry-server" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178581 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="registry-server" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178764 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" containerName="registry-server" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.178787 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d673f490-5579-4fe5-b65c-0d1f9da6d976" containerName="keystone-db-sync" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.187022 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.202396 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84579fb745-sxp7x"] Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.220872 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-rmwws"] Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.222509 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.229724 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.230104 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-x5726" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.230241 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.230363 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.230778 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.231000 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 14:59:08 crc kubenswrapper[4857]: E1128 14:59:08.231022 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.244622 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9efeffff-1d6d-4174-8fd8-deea5db88bb1" path="/var/lib/kubelet/pods/9efeffff-1d6d-4174-8fd8-deea5db88bb1/volumes" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.259205 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-rmwws"] Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.348511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-config-data\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.349325 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-combined-ca-bundle\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.350300 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5qb9\" (UniqueName: \"kubernetes.io/projected/489c405d-7a9b-47ec-9a23-77c51bc73f0a-kube-api-access-d5qb9\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.350374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-config\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.350415 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-dns-svc\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.350468 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-sb\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.350720 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-fernet-keys\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.350794 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-credential-keys\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.351043 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-nb\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.351082 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9vlp\" (UniqueName: \"kubernetes.io/projected/fa98099d-1408-4931-9e60-6b0cddb52a33-kube-api-access-g9vlp\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.351103 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-scripts\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452179 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-nb\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452224 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9vlp\" (UniqueName: \"kubernetes.io/projected/fa98099d-1408-4931-9e60-6b0cddb52a33-kube-api-access-g9vlp\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-scripts\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452277 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-config-data\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452302 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-combined-ca-bundle\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452321 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5qb9\" (UniqueName: \"kubernetes.io/projected/489c405d-7a9b-47ec-9a23-77c51bc73f0a-kube-api-access-d5qb9\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452342 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-config\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452368 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-dns-svc\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452383 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-sb\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452425 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-fernet-keys\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.452459 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-credential-keys\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.453968 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-nb\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.454124 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-config\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.454252 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-sb\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.454719 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-dns-svc\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.458503 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-scripts\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.458735 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-credential-keys\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.458891 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-fernet-keys\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.459924 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-combined-ca-bundle\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.463806 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-config-data\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.483627 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9vlp\" (UniqueName: \"kubernetes.io/projected/fa98099d-1408-4931-9e60-6b0cddb52a33-kube-api-access-g9vlp\") pod \"keystone-bootstrap-rmwws\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.490454 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5qb9\" (UniqueName: \"kubernetes.io/projected/489c405d-7a9b-47ec-9a23-77c51bc73f0a-kube-api-access-d5qb9\") pod \"dnsmasq-dns-84579fb745-sxp7x\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.508881 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:08 crc kubenswrapper[4857]: I1128 14:59:08.552525 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.028711 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84579fb745-sxp7x"] Nov 28 14:59:09 crc kubenswrapper[4857]: W1128 14:59:09.053988 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod489c405d_7a9b_47ec_9a23_77c51bc73f0a.slice/crio-9393f3163363f8e20ae9199b263e4d0998a5c972481c0ac0d0809534649e18f5 WatchSource:0}: Error finding container 9393f3163363f8e20ae9199b263e4d0998a5c972481c0ac0d0809534649e18f5: Status 404 returned error can't find the container with id 9393f3163363f8e20ae9199b263e4d0998a5c972481c0ac0d0809534649e18f5 Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.145981 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-rmwws"] Nov 28 14:59:09 crc kubenswrapper[4857]: W1128 14:59:09.156818 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfa98099d_1408_4931_9e60_6b0cddb52a33.slice/crio-93f48a76939bbf7c72aba4d55ba9250c98e8f5a156d3b75ce313c04b1e3d3fbb WatchSource:0}: Error finding container 93f48a76939bbf7c72aba4d55ba9250c98e8f5a156d3b75ce313c04b1e3d3fbb: Status 404 returned error can't find the container with id 93f48a76939bbf7c72aba4d55ba9250c98e8f5a156d3b75ce313c04b1e3d3fbb Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.935890 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rmwws" event={"ID":"fa98099d-1408-4931-9e60-6b0cddb52a33","Type":"ContainerStarted","Data":"182ea2ef9e9ffe6017b9dc5af6c4d660035946c417d9373027669789c1a43ff7"} Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.936208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rmwws" event={"ID":"fa98099d-1408-4931-9e60-6b0cddb52a33","Type":"ContainerStarted","Data":"93f48a76939bbf7c72aba4d55ba9250c98e8f5a156d3b75ce313c04b1e3d3fbb"} Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.938101 4857 generic.go:334] "Generic (PLEG): container finished" podID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerID="316525afd043a12c14f7fe7680d359118a05c982c5f8aa8d79d9b8a742bce663" exitCode=0 Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.938820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" event={"ID":"489c405d-7a9b-47ec-9a23-77c51bc73f0a","Type":"ContainerDied","Data":"316525afd043a12c14f7fe7680d359118a05c982c5f8aa8d79d9b8a742bce663"} Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.938921 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" event={"ID":"489c405d-7a9b-47ec-9a23-77c51bc73f0a","Type":"ContainerStarted","Data":"9393f3163363f8e20ae9199b263e4d0998a5c972481c0ac0d0809534649e18f5"} Nov 28 14:59:09 crc kubenswrapper[4857]: I1128 14:59:09.985719 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-rmwws" podStartSLOduration=1.985683609 podStartE2EDuration="1.985683609s" podCreationTimestamp="2025-11-28 14:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:59:09.973071642 +0000 UTC m=+5400.097013079" watchObservedRunningTime="2025-11-28 14:59:09.985683609 +0000 UTC m=+5400.109625086" Nov 28 14:59:10 crc kubenswrapper[4857]: I1128 14:59:10.958015 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" event={"ID":"489c405d-7a9b-47ec-9a23-77c51bc73f0a","Type":"ContainerStarted","Data":"1bed143d28f937e7dd46ca81891dfbcf1ef54efebf63f89151307deb62d060a1"} Nov 28 14:59:10 crc kubenswrapper[4857]: I1128 14:59:10.999379 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" podStartSLOduration=2.999327725 podStartE2EDuration="2.999327725s" podCreationTimestamp="2025-11-28 14:59:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:59:10.98830947 +0000 UTC m=+5401.112250917" watchObservedRunningTime="2025-11-28 14:59:10.999327725 +0000 UTC m=+5401.123269232" Nov 28 14:59:11 crc kubenswrapper[4857]: I1128 14:59:11.968296 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:13 crc kubenswrapper[4857]: I1128 14:59:13.010331 4857 generic.go:334] "Generic (PLEG): container finished" podID="fa98099d-1408-4931-9e60-6b0cddb52a33" containerID="182ea2ef9e9ffe6017b9dc5af6c4d660035946c417d9373027669789c1a43ff7" exitCode=0 Nov 28 14:59:13 crc kubenswrapper[4857]: I1128 14:59:13.012920 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rmwws" event={"ID":"fa98099d-1408-4931-9e60-6b0cddb52a33","Type":"ContainerDied","Data":"182ea2ef9e9ffe6017b9dc5af6c4d660035946c417d9373027669789c1a43ff7"} Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.411220 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.593536 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-credential-keys\") pod \"fa98099d-1408-4931-9e60-6b0cddb52a33\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.593800 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-config-data\") pod \"fa98099d-1408-4931-9e60-6b0cddb52a33\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.593861 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-scripts\") pod \"fa98099d-1408-4931-9e60-6b0cddb52a33\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.594005 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-fernet-keys\") pod \"fa98099d-1408-4931-9e60-6b0cddb52a33\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.594143 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-combined-ca-bundle\") pod \"fa98099d-1408-4931-9e60-6b0cddb52a33\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.594301 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9vlp\" (UniqueName: \"kubernetes.io/projected/fa98099d-1408-4931-9e60-6b0cddb52a33-kube-api-access-g9vlp\") pod \"fa98099d-1408-4931-9e60-6b0cddb52a33\" (UID: \"fa98099d-1408-4931-9e60-6b0cddb52a33\") " Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.605341 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa98099d-1408-4931-9e60-6b0cddb52a33-kube-api-access-g9vlp" (OuterVolumeSpecName: "kube-api-access-g9vlp") pod "fa98099d-1408-4931-9e60-6b0cddb52a33" (UID: "fa98099d-1408-4931-9e60-6b0cddb52a33"). InnerVolumeSpecName "kube-api-access-g9vlp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.609342 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "fa98099d-1408-4931-9e60-6b0cddb52a33" (UID: "fa98099d-1408-4931-9e60-6b0cddb52a33"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.609372 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "fa98099d-1408-4931-9e60-6b0cddb52a33" (UID: "fa98099d-1408-4931-9e60-6b0cddb52a33"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.610042 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-scripts" (OuterVolumeSpecName: "scripts") pod "fa98099d-1408-4931-9e60-6b0cddb52a33" (UID: "fa98099d-1408-4931-9e60-6b0cddb52a33"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.631991 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-config-data" (OuterVolumeSpecName: "config-data") pod "fa98099d-1408-4931-9e60-6b0cddb52a33" (UID: "fa98099d-1408-4931-9e60-6b0cddb52a33"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.647577 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa98099d-1408-4931-9e60-6b0cddb52a33" (UID: "fa98099d-1408-4931-9e60-6b0cddb52a33"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.697253 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.697308 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9vlp\" (UniqueName: \"kubernetes.io/projected/fa98099d-1408-4931-9e60-6b0cddb52a33-kube-api-access-g9vlp\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.697323 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.697333 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.697345 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:14 crc kubenswrapper[4857]: I1128 14:59:14.697355 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/fa98099d-1408-4931-9e60-6b0cddb52a33-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.037827 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-rmwws" event={"ID":"fa98099d-1408-4931-9e60-6b0cddb52a33","Type":"ContainerDied","Data":"93f48a76939bbf7c72aba4d55ba9250c98e8f5a156d3b75ce313c04b1e3d3fbb"} Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.038430 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93f48a76939bbf7c72aba4d55ba9250c98e8f5a156d3b75ce313c04b1e3d3fbb" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.038212 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-rmwws" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.147024 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-rmwws"] Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.159221 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-rmwws"] Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.231787 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-qcn9g"] Nov 28 14:59:15 crc kubenswrapper[4857]: E1128 14:59:15.232390 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa98099d-1408-4931-9e60-6b0cddb52a33" containerName="keystone-bootstrap" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.232423 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa98099d-1408-4931-9e60-6b0cddb52a33" containerName="keystone-bootstrap" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.232725 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa98099d-1408-4931-9e60-6b0cddb52a33" containerName="keystone-bootstrap" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.233571 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.240404 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.240528 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-x5726" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.240409 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.241991 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.245090 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.271403 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qcn9g"] Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.408798 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnm79\" (UniqueName: \"kubernetes.io/projected/778872e4-550d-471f-989e-866581a3ba7b-kube-api-access-nnm79\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.408904 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-scripts\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.408995 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-credential-keys\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.409056 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-combined-ca-bundle\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.409093 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-fernet-keys\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.409222 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-config-data\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.510920 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-scripts\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.510990 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-credential-keys\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.511041 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-combined-ca-bundle\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.511072 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-fernet-keys\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.511103 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-config-data\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.511143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnm79\" (UniqueName: \"kubernetes.io/projected/778872e4-550d-471f-989e-866581a3ba7b-kube-api-access-nnm79\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.519832 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-combined-ca-bundle\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.519857 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-config-data\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.519899 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-scripts\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.520183 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-credential-keys\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.521239 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-fernet-keys\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.539875 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnm79\" (UniqueName: \"kubernetes.io/projected/778872e4-550d-471f-989e-866581a3ba7b-kube-api-access-nnm79\") pod \"keystone-bootstrap-qcn9g\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:15 crc kubenswrapper[4857]: I1128 14:59:15.574596 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:16 crc kubenswrapper[4857]: W1128 14:59:16.099493 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod778872e4_550d_471f_989e_866581a3ba7b.slice/crio-bac7f8d2174a7b50553006f11c3597a55308010d866d9508e4881dafff4f3e3f WatchSource:0}: Error finding container bac7f8d2174a7b50553006f11c3597a55308010d866d9508e4881dafff4f3e3f: Status 404 returned error can't find the container with id bac7f8d2174a7b50553006f11c3597a55308010d866d9508e4881dafff4f3e3f Nov 28 14:59:16 crc kubenswrapper[4857]: I1128 14:59:16.103650 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-qcn9g"] Nov 28 14:59:16 crc kubenswrapper[4857]: I1128 14:59:16.242938 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa98099d-1408-4931-9e60-6b0cddb52a33" path="/var/lib/kubelet/pods/fa98099d-1408-4931-9e60-6b0cddb52a33/volumes" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.067917 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qcn9g" event={"ID":"778872e4-550d-471f-989e-866581a3ba7b","Type":"ContainerStarted","Data":"ef7832557a5b4faea11181838ee8f7cc774a8ed39c8007866fd7edfbd451b7cd"} Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.067991 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qcn9g" event={"ID":"778872e4-550d-471f-989e-866581a3ba7b","Type":"ContainerStarted","Data":"bac7f8d2174a7b50553006f11c3597a55308010d866d9508e4881dafff4f3e3f"} Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.164545 4857 scope.go:117] "RemoveContainer" containerID="34b6c471388ade7c1ca12f7030a3c4a2762c97e2aa0fcfd2d39276f604dc6eea" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.188423 4857 scope.go:117] "RemoveContainer" containerID="bf3886ebf17a80e05fc67649b5e4d9d402f101838495f5623b8d7c742ca9d1c6" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.252504 4857 scope.go:117] "RemoveContainer" containerID="4571ef6083c534717395fddb9b91748050f9b5d47af431a336aaec82d893ec6e" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.272180 4857 scope.go:117] "RemoveContainer" containerID="3569a6d8b64c49c9fd321db1e257107dabed5eabd94282c2d4681ebf9bca59b1" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.307339 4857 scope.go:117] "RemoveContainer" containerID="1227345c6c1b250b7d629c71ef4fd08a23ea0f619a4f20395322d31771f71c6b" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.339479 4857 scope.go:117] "RemoveContainer" containerID="c68952e32acee964b334e8293f6946c4c75aaf6ed121b14feb50d1035595703d" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.358669 4857 scope.go:117] "RemoveContainer" containerID="3329be992d61209ed52de6fbe7ef61e67bf87720819f8e7d908b083c09c80607" Nov 28 14:59:17 crc kubenswrapper[4857]: I1128 14:59:17.388878 4857 scope.go:117] "RemoveContainer" containerID="12e4b9aec4e908a5b324876cc69c4e82352b942ca1dc187454dc666c72af60ec" Nov 28 14:59:18 crc kubenswrapper[4857]: I1128 14:59:18.511643 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 14:59:18 crc kubenswrapper[4857]: I1128 14:59:18.553254 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-qcn9g" podStartSLOduration=3.553221674 podStartE2EDuration="3.553221674s" podCreationTimestamp="2025-11-28 14:59:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:59:17.091036328 +0000 UTC m=+5407.214977805" watchObservedRunningTime="2025-11-28 14:59:18.553221674 +0000 UTC m=+5408.677163141" Nov 28 14:59:18 crc kubenswrapper[4857]: I1128 14:59:18.606599 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764bd786bf-qn86q"] Nov 28 14:59:18 crc kubenswrapper[4857]: I1128 14:59:18.606899 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerName="dnsmasq-dns" containerID="cri-o://6968c94745783c56961b5594978f2546c7fe813be98b7aa148992629cb221b09" gracePeriod=10 Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.085147 4857 generic.go:334] "Generic (PLEG): container finished" podID="778872e4-550d-471f-989e-866581a3ba7b" containerID="ef7832557a5b4faea11181838ee8f7cc774a8ed39c8007866fd7edfbd451b7cd" exitCode=0 Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.085244 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qcn9g" event={"ID":"778872e4-550d-471f-989e-866581a3ba7b","Type":"ContainerDied","Data":"ef7832557a5b4faea11181838ee8f7cc774a8ed39c8007866fd7edfbd451b7cd"} Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.088493 4857 generic.go:334] "Generic (PLEG): container finished" podID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerID="6968c94745783c56961b5594978f2546c7fe813be98b7aa148992629cb221b09" exitCode=0 Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.088549 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" event={"ID":"0db56d3b-4f53-4cb7-9487-e354c68f6346","Type":"ContainerDied","Data":"6968c94745783c56961b5594978f2546c7fe813be98b7aa148992629cb221b09"} Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.088579 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" event={"ID":"0db56d3b-4f53-4cb7-9487-e354c68f6346","Type":"ContainerDied","Data":"dfcda5e20a798bbae5199c119792eabfe57fd68c417a3beeeca5ed0e006f7c4f"} Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.088591 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dfcda5e20a798bbae5199c119792eabfe57fd68c417a3beeeca5ed0e006f7c4f" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.128414 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.283065 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-config\") pod \"0db56d3b-4f53-4cb7-9487-e354c68f6346\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.283154 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-dns-svc\") pod \"0db56d3b-4f53-4cb7-9487-e354c68f6346\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.283229 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-sb\") pod \"0db56d3b-4f53-4cb7-9487-e354c68f6346\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.283310 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-nb\") pod \"0db56d3b-4f53-4cb7-9487-e354c68f6346\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.283403 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jksjf\" (UniqueName: \"kubernetes.io/projected/0db56d3b-4f53-4cb7-9487-e354c68f6346-kube-api-access-jksjf\") pod \"0db56d3b-4f53-4cb7-9487-e354c68f6346\" (UID: \"0db56d3b-4f53-4cb7-9487-e354c68f6346\") " Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.291279 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0db56d3b-4f53-4cb7-9487-e354c68f6346-kube-api-access-jksjf" (OuterVolumeSpecName: "kube-api-access-jksjf") pod "0db56d3b-4f53-4cb7-9487-e354c68f6346" (UID: "0db56d3b-4f53-4cb7-9487-e354c68f6346"). InnerVolumeSpecName "kube-api-access-jksjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.331243 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0db56d3b-4f53-4cb7-9487-e354c68f6346" (UID: "0db56d3b-4f53-4cb7-9487-e354c68f6346"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.336192 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0db56d3b-4f53-4cb7-9487-e354c68f6346" (UID: "0db56d3b-4f53-4cb7-9487-e354c68f6346"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.347209 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-config" (OuterVolumeSpecName: "config") pod "0db56d3b-4f53-4cb7-9487-e354c68f6346" (UID: "0db56d3b-4f53-4cb7-9487-e354c68f6346"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.351271 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0db56d3b-4f53-4cb7-9487-e354c68f6346" (UID: "0db56d3b-4f53-4cb7-9487-e354c68f6346"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.385116 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-config\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.385154 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.385164 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.385173 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0db56d3b-4f53-4cb7-9487-e354c68f6346-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:19 crc kubenswrapper[4857]: I1128 14:59:19.385183 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jksjf\" (UniqueName: \"kubernetes.io/projected/0db56d3b-4f53-4cb7-9487-e354c68f6346-kube-api-access-jksjf\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:20 crc kubenswrapper[4857]: I1128 14:59:20.098738 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-764bd786bf-qn86q" Nov 28 14:59:20 crc kubenswrapper[4857]: I1128 14:59:20.158921 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-764bd786bf-qn86q"] Nov 28 14:59:20 crc kubenswrapper[4857]: I1128 14:59:20.173472 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-764bd786bf-qn86q"] Nov 28 14:59:20 crc kubenswrapper[4857]: I1128 14:59:20.262627 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" path="/var/lib/kubelet/pods/0db56d3b-4f53-4cb7-9487-e354c68f6346/volumes" Nov 28 14:59:20 crc kubenswrapper[4857]: I1128 14:59:20.906128 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.071440 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-credential-keys\") pod \"778872e4-550d-471f-989e-866581a3ba7b\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.071521 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-scripts\") pod \"778872e4-550d-471f-989e-866581a3ba7b\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.071544 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-config-data\") pod \"778872e4-550d-471f-989e-866581a3ba7b\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.071584 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-combined-ca-bundle\") pod \"778872e4-550d-471f-989e-866581a3ba7b\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.071681 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-fernet-keys\") pod \"778872e4-550d-471f-989e-866581a3ba7b\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.071706 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnm79\" (UniqueName: \"kubernetes.io/projected/778872e4-550d-471f-989e-866581a3ba7b-kube-api-access-nnm79\") pod \"778872e4-550d-471f-989e-866581a3ba7b\" (UID: \"778872e4-550d-471f-989e-866581a3ba7b\") " Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.078445 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-scripts" (OuterVolumeSpecName: "scripts") pod "778872e4-550d-471f-989e-866581a3ba7b" (UID: "778872e4-550d-471f-989e-866581a3ba7b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.078495 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/778872e4-550d-471f-989e-866581a3ba7b-kube-api-access-nnm79" (OuterVolumeSpecName: "kube-api-access-nnm79") pod "778872e4-550d-471f-989e-866581a3ba7b" (UID: "778872e4-550d-471f-989e-866581a3ba7b"). InnerVolumeSpecName "kube-api-access-nnm79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.080318 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "778872e4-550d-471f-989e-866581a3ba7b" (UID: "778872e4-550d-471f-989e-866581a3ba7b"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.081152 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "778872e4-550d-471f-989e-866581a3ba7b" (UID: "778872e4-550d-471f-989e-866581a3ba7b"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.095706 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "778872e4-550d-471f-989e-866581a3ba7b" (UID: "778872e4-550d-471f-989e-866581a3ba7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.114150 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-qcn9g" event={"ID":"778872e4-550d-471f-989e-866581a3ba7b","Type":"ContainerDied","Data":"bac7f8d2174a7b50553006f11c3597a55308010d866d9508e4881dafff4f3e3f"} Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.115302 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bac7f8d2174a7b50553006f11c3597a55308010d866d9508e4881dafff4f3e3f" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.114265 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-qcn9g" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.118144 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-config-data" (OuterVolumeSpecName: "config-data") pod "778872e4-550d-471f-989e-866581a3ba7b" (UID: "778872e4-550d-471f-989e-866581a3ba7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.178743 4857 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.178823 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.178851 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.178879 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.178906 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/778872e4-550d-471f-989e-866581a3ba7b-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.178931 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnm79\" (UniqueName: \"kubernetes.io/projected/778872e4-550d-471f-989e-866581a3ba7b-kube-api-access-nnm79\") on node \"crc\" DevicePath \"\"" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.203333 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6bf47649b6-ns6fh"] Nov 28 14:59:21 crc kubenswrapper[4857]: E1128 14:59:21.203734 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerName="init" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.203757 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerName="init" Nov 28 14:59:21 crc kubenswrapper[4857]: E1128 14:59:21.203800 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerName="dnsmasq-dns" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.203810 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerName="dnsmasq-dns" Nov 28 14:59:21 crc kubenswrapper[4857]: E1128 14:59:21.203819 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="778872e4-550d-471f-989e-866581a3ba7b" containerName="keystone-bootstrap" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.203828 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="778872e4-550d-471f-989e-866581a3ba7b" containerName="keystone-bootstrap" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.204050 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="778872e4-550d-471f-989e-866581a3ba7b" containerName="keystone-bootstrap" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.204071 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0db56d3b-4f53-4cb7-9487-e354c68f6346" containerName="dnsmasq-dns" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.204766 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.218769 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6bf47649b6-ns6fh"] Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.229173 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:59:21 crc kubenswrapper[4857]: E1128 14:59:21.229390 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.280915 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-fernet-keys\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.281022 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrq6m\" (UniqueName: \"kubernetes.io/projected/29fc2d85-fb2f-4941-9b48-f90e099492e5-kube-api-access-qrq6m\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.281049 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-credential-keys\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.281312 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-scripts\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.281507 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-config-data\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.281561 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-combined-ca-bundle\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.384085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-fernet-keys\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.386469 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrq6m\" (UniqueName: \"kubernetes.io/projected/29fc2d85-fb2f-4941-9b48-f90e099492e5-kube-api-access-qrq6m\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.386604 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-credential-keys\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.386686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-scripts\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.386917 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-config-data\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.387041 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-combined-ca-bundle\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.388528 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-fernet-keys\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.390863 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-scripts\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.391769 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-config-data\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.393328 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-credential-keys\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.402548 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29fc2d85-fb2f-4941-9b48-f90e099492e5-combined-ca-bundle\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.404780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrq6m\" (UniqueName: \"kubernetes.io/projected/29fc2d85-fb2f-4941-9b48-f90e099492e5-kube-api-access-qrq6m\") pod \"keystone-6bf47649b6-ns6fh\" (UID: \"29fc2d85-fb2f-4941-9b48-f90e099492e5\") " pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:21 crc kubenswrapper[4857]: I1128 14:59:21.529572 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:22 crc kubenswrapper[4857]: I1128 14:59:22.140451 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6bf47649b6-ns6fh"] Nov 28 14:59:23 crc kubenswrapper[4857]: I1128 14:59:23.140858 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bf47649b6-ns6fh" event={"ID":"29fc2d85-fb2f-4941-9b48-f90e099492e5","Type":"ContainerStarted","Data":"09f8e94500e6883da809fe543e5b956f317f6f55ba1f777a98fc5e81ffa5782f"} Nov 28 14:59:23 crc kubenswrapper[4857]: I1128 14:59:23.141637 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6bf47649b6-ns6fh" event={"ID":"29fc2d85-fb2f-4941-9b48-f90e099492e5","Type":"ContainerStarted","Data":"6c42dee6c9f680d61b3dd138093d341bca60be1f7ad132151373d6259ce3c241"} Nov 28 14:59:23 crc kubenswrapper[4857]: I1128 14:59:23.142072 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:23 crc kubenswrapper[4857]: I1128 14:59:23.168754 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6bf47649b6-ns6fh" podStartSLOduration=2.168720726 podStartE2EDuration="2.168720726s" podCreationTimestamp="2025-11-28 14:59:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:59:23.165446189 +0000 UTC m=+5413.289387656" watchObservedRunningTime="2025-11-28 14:59:23.168720726 +0000 UTC m=+5413.292662163" Nov 28 14:59:32 crc kubenswrapper[4857]: I1128 14:59:32.230538 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:59:32 crc kubenswrapper[4857]: E1128 14:59:32.232014 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 14:59:43 crc kubenswrapper[4857]: I1128 14:59:43.229363 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 14:59:44 crc kubenswrapper[4857]: I1128 14:59:44.431070 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"e74738a0b8d5bffa43f61ec8bf86cdc9bce08ade2ead6c33503f5aba9862d3f0"} Nov 28 14:59:52 crc kubenswrapper[4857]: I1128 14:59:52.955469 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6bf47649b6-ns6fh" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.754151 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.758447 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.764782 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.764928 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-d2plt" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.765276 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.818084 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.818106 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xx6x\" (UniqueName: \"kubernetes.io/projected/04ee383b-d1a2-4c75-8482-10ed3d034049-kube-api-access-8xx6x\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.818227 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.818316 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config-secret\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.920037 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.920146 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config-secret\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.920238 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xx6x\" (UniqueName: \"kubernetes.io/projected/04ee383b-d1a2-4c75-8482-10ed3d034049-kube-api-access-8xx6x\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.921570 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.930940 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config-secret\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:55 crc kubenswrapper[4857]: I1128 14:59:55.937774 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xx6x\" (UniqueName: \"kubernetes.io/projected/04ee383b-d1a2-4c75-8482-10ed3d034049-kube-api-access-8xx6x\") pod \"openstackclient\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " pod="openstack/openstackclient" Nov 28 14:59:56 crc kubenswrapper[4857]: I1128 14:59:56.086852 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 14:59:56 crc kubenswrapper[4857]: I1128 14:59:56.613618 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.321493 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rqngt"] Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.326250 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.346043 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqngt"] Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.453473 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4d87\" (UniqueName: \"kubernetes.io/projected/fd22d248-4f8b-45b4-9443-7775f52aacb1-kube-api-access-k4d87\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.453861 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-catalog-content\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.453882 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-utilities\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.555465 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4d87\" (UniqueName: \"kubernetes.io/projected/fd22d248-4f8b-45b4-9443-7775f52aacb1-kube-api-access-k4d87\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.555639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-catalog-content\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.555682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-utilities\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.556309 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-catalog-content\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.556612 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-utilities\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.574425 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4d87\" (UniqueName: \"kubernetes.io/projected/fd22d248-4f8b-45b4-9443-7775f52aacb1-kube-api-access-k4d87\") pod \"redhat-marketplace-rqngt\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.580780 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"04ee383b-d1a2-4c75-8482-10ed3d034049","Type":"ContainerStarted","Data":"4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c"} Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.580817 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"04ee383b-d1a2-4c75-8482-10ed3d034049","Type":"ContainerStarted","Data":"101f65f78a02c2a669a55140e8313b162ff6954f4f143c53dfc33776f0b54e3f"} Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.598821 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.598806771 podStartE2EDuration="2.598806771s" podCreationTimestamp="2025-11-28 14:59:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 14:59:57.595629876 +0000 UTC m=+5447.719571353" watchObservedRunningTime="2025-11-28 14:59:57.598806771 +0000 UTC m=+5447.722748208" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.666477 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 14:59:57 crc kubenswrapper[4857]: I1128 14:59:57.963368 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqngt"] Nov 28 14:59:57 crc kubenswrapper[4857]: W1128 14:59:57.964667 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd22d248_4f8b_45b4_9443_7775f52aacb1.slice/crio-7d0aab4289ef174cc44a44adbe6f7acd5d07dc844c3c74b958ce9065b9339a92 WatchSource:0}: Error finding container 7d0aab4289ef174cc44a44adbe6f7acd5d07dc844c3c74b958ce9065b9339a92: Status 404 returned error can't find the container with id 7d0aab4289ef174cc44a44adbe6f7acd5d07dc844c3c74b958ce9065b9339a92 Nov 28 14:59:58 crc kubenswrapper[4857]: I1128 14:59:58.593534 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerID="9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5" exitCode=0 Nov 28 14:59:58 crc kubenswrapper[4857]: I1128 14:59:58.593657 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerDied","Data":"9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5"} Nov 28 14:59:58 crc kubenswrapper[4857]: I1128 14:59:58.594105 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerStarted","Data":"7d0aab4289ef174cc44a44adbe6f7acd5d07dc844c3c74b958ce9065b9339a92"} Nov 28 14:59:58 crc kubenswrapper[4857]: I1128 14:59:58.596336 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 14:59:59 crc kubenswrapper[4857]: I1128 14:59:59.606409 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerStarted","Data":"a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee"} Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.148040 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl"] Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.150114 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.156626 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.157120 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.163109 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl"] Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.199568 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45m4j\" (UniqueName: \"kubernetes.io/projected/ffebd404-bed5-47cb-b62c-2623c1b59568-kube-api-access-45m4j\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.199683 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ffebd404-bed5-47cb-b62c-2623c1b59568-config-volume\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.199769 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ffebd404-bed5-47cb-b62c-2623c1b59568-secret-volume\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.301857 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ffebd404-bed5-47cb-b62c-2623c1b59568-config-volume\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.302012 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ffebd404-bed5-47cb-b62c-2623c1b59568-secret-volume\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.302130 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45m4j\" (UniqueName: \"kubernetes.io/projected/ffebd404-bed5-47cb-b62c-2623c1b59568-kube-api-access-45m4j\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.303352 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ffebd404-bed5-47cb-b62c-2623c1b59568-config-volume\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.310243 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ffebd404-bed5-47cb-b62c-2623c1b59568-secret-volume\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.319850 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45m4j\" (UniqueName: \"kubernetes.io/projected/ffebd404-bed5-47cb-b62c-2623c1b59568-kube-api-access-45m4j\") pod \"collect-profiles-29405700-9mfxl\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.523056 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.630789 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerID="a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee" exitCode=0 Nov 28 15:00:00 crc kubenswrapper[4857]: I1128 15:00:00.630836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerDied","Data":"a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee"} Nov 28 15:00:01 crc kubenswrapper[4857]: I1128 15:00:01.019799 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl"] Nov 28 15:00:01 crc kubenswrapper[4857]: W1128 15:00:01.033700 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podffebd404_bed5_47cb_b62c_2623c1b59568.slice/crio-e0a7fcc6cc416b45d02a6aa5d1f59b16ffed2a917a5f111f42220982fd005784 WatchSource:0}: Error finding container e0a7fcc6cc416b45d02a6aa5d1f59b16ffed2a917a5f111f42220982fd005784: Status 404 returned error can't find the container with id e0a7fcc6cc416b45d02a6aa5d1f59b16ffed2a917a5f111f42220982fd005784 Nov 28 15:00:01 crc kubenswrapper[4857]: I1128 15:00:01.655400 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerStarted","Data":"e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514"} Nov 28 15:00:01 crc kubenswrapper[4857]: I1128 15:00:01.661846 4857 generic.go:334] "Generic (PLEG): container finished" podID="ffebd404-bed5-47cb-b62c-2623c1b59568" containerID="106b2e90d47e7d80d423598d3a08726a893f506fb0b106f5174320ee76fa6161" exitCode=0 Nov 28 15:00:01 crc kubenswrapper[4857]: I1128 15:00:01.661909 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" event={"ID":"ffebd404-bed5-47cb-b62c-2623c1b59568","Type":"ContainerDied","Data":"106b2e90d47e7d80d423598d3a08726a893f506fb0b106f5174320ee76fa6161"} Nov 28 15:00:01 crc kubenswrapper[4857]: I1128 15:00:01.661935 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" event={"ID":"ffebd404-bed5-47cb-b62c-2623c1b59568","Type":"ContainerStarted","Data":"e0a7fcc6cc416b45d02a6aa5d1f59b16ffed2a917a5f111f42220982fd005784"} Nov 28 15:00:01 crc kubenswrapper[4857]: I1128 15:00:01.680514 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rqngt" podStartSLOduration=2.049827918 podStartE2EDuration="4.680491375s" podCreationTimestamp="2025-11-28 14:59:57 +0000 UTC" firstStartedPulling="2025-11-28 14:59:58.596033897 +0000 UTC m=+5448.719975334" lastFinishedPulling="2025-11-28 15:00:01.226697354 +0000 UTC m=+5451.350638791" observedRunningTime="2025-11-28 15:00:01.675536003 +0000 UTC m=+5451.799477440" watchObservedRunningTime="2025-11-28 15:00:01.680491375 +0000 UTC m=+5451.804432822" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.049571 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.065892 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45m4j\" (UniqueName: \"kubernetes.io/projected/ffebd404-bed5-47cb-b62c-2623c1b59568-kube-api-access-45m4j\") pod \"ffebd404-bed5-47cb-b62c-2623c1b59568\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.066129 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ffebd404-bed5-47cb-b62c-2623c1b59568-config-volume\") pod \"ffebd404-bed5-47cb-b62c-2623c1b59568\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.066174 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ffebd404-bed5-47cb-b62c-2623c1b59568-secret-volume\") pod \"ffebd404-bed5-47cb-b62c-2623c1b59568\" (UID: \"ffebd404-bed5-47cb-b62c-2623c1b59568\") " Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.067413 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ffebd404-bed5-47cb-b62c-2623c1b59568-config-volume" (OuterVolumeSpecName: "config-volume") pod "ffebd404-bed5-47cb-b62c-2623c1b59568" (UID: "ffebd404-bed5-47cb-b62c-2623c1b59568"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.077445 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffebd404-bed5-47cb-b62c-2623c1b59568-kube-api-access-45m4j" (OuterVolumeSpecName: "kube-api-access-45m4j") pod "ffebd404-bed5-47cb-b62c-2623c1b59568" (UID: "ffebd404-bed5-47cb-b62c-2623c1b59568"). InnerVolumeSpecName "kube-api-access-45m4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.079197 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffebd404-bed5-47cb-b62c-2623c1b59568-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ffebd404-bed5-47cb-b62c-2623c1b59568" (UID: "ffebd404-bed5-47cb-b62c-2623c1b59568"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.167716 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45m4j\" (UniqueName: \"kubernetes.io/projected/ffebd404-bed5-47cb-b62c-2623c1b59568-kube-api-access-45m4j\") on node \"crc\" DevicePath \"\"" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.167751 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ffebd404-bed5-47cb-b62c-2623c1b59568-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.167760 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ffebd404-bed5-47cb-b62c-2623c1b59568-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.709994 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" event={"ID":"ffebd404-bed5-47cb-b62c-2623c1b59568","Type":"ContainerDied","Data":"e0a7fcc6cc416b45d02a6aa5d1f59b16ffed2a917a5f111f42220982fd005784"} Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.710083 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0a7fcc6cc416b45d02a6aa5d1f59b16ffed2a917a5f111f42220982fd005784" Nov 28 15:00:03 crc kubenswrapper[4857]: I1128 15:00:03.710039 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl" Nov 28 15:00:04 crc kubenswrapper[4857]: I1128 15:00:04.143037 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn"] Nov 28 15:00:04 crc kubenswrapper[4857]: I1128 15:00:04.153166 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405655-fr6rn"] Nov 28 15:00:04 crc kubenswrapper[4857]: I1128 15:00:04.242130 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1ba1836-332a-4710-a660-78728a3e65cf" path="/var/lib/kubelet/pods/b1ba1836-332a-4710-a660-78728a3e65cf/volumes" Nov 28 15:00:07 crc kubenswrapper[4857]: I1128 15:00:07.666864 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 15:00:07 crc kubenswrapper[4857]: I1128 15:00:07.667408 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 15:00:07 crc kubenswrapper[4857]: I1128 15:00:07.752796 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 15:00:07 crc kubenswrapper[4857]: I1128 15:00:07.806472 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 15:00:08 crc kubenswrapper[4857]: I1128 15:00:08.002697 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqngt"] Nov 28 15:00:09 crc kubenswrapper[4857]: I1128 15:00:09.775837 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rqngt" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="registry-server" containerID="cri-o://e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514" gracePeriod=2 Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.767399 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.793707 4857 generic.go:334] "Generic (PLEG): container finished" podID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerID="e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514" exitCode=0 Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.793777 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerDied","Data":"e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514"} Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.793816 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rqngt" event={"ID":"fd22d248-4f8b-45b4-9443-7775f52aacb1","Type":"ContainerDied","Data":"7d0aab4289ef174cc44a44adbe6f7acd5d07dc844c3c74b958ce9065b9339a92"} Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.793839 4857 scope.go:117] "RemoveContainer" containerID="e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.793844 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rqngt" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.823266 4857 scope.go:117] "RemoveContainer" containerID="a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.852655 4857 scope.go:117] "RemoveContainer" containerID="9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.877790 4857 scope.go:117] "RemoveContainer" containerID="e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514" Nov 28 15:00:10 crc kubenswrapper[4857]: E1128 15:00:10.878368 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514\": container with ID starting with e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514 not found: ID does not exist" containerID="e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.878399 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514"} err="failed to get container status \"e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514\": rpc error: code = NotFound desc = could not find container \"e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514\": container with ID starting with e96fd1f18dc24e81b55a16dc9ab8f9faa47ed1106e41f6ea4b4becf438714514 not found: ID does not exist" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.878422 4857 scope.go:117] "RemoveContainer" containerID="a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee" Nov 28 15:00:10 crc kubenswrapper[4857]: E1128 15:00:10.878837 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee\": container with ID starting with a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee not found: ID does not exist" containerID="a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.878890 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee"} err="failed to get container status \"a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee\": rpc error: code = NotFound desc = could not find container \"a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee\": container with ID starting with a2736650f3278db5eef786d6dbe54e0c77e0ae52793c0e7212a00e3c6218e2ee not found: ID does not exist" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.878931 4857 scope.go:117] "RemoveContainer" containerID="9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5" Nov 28 15:00:10 crc kubenswrapper[4857]: E1128 15:00:10.879412 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5\": container with ID starting with 9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5 not found: ID does not exist" containerID="9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.879439 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5"} err="failed to get container status \"9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5\": rpc error: code = NotFound desc = could not find container \"9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5\": container with ID starting with 9acb220e146d4c4dab56c43f1acea071305471dbefa067f317fbe234afd388d5 not found: ID does not exist" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.945494 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4d87\" (UniqueName: \"kubernetes.io/projected/fd22d248-4f8b-45b4-9443-7775f52aacb1-kube-api-access-k4d87\") pod \"fd22d248-4f8b-45b4-9443-7775f52aacb1\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.945551 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-catalog-content\") pod \"fd22d248-4f8b-45b4-9443-7775f52aacb1\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.945645 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-utilities\") pod \"fd22d248-4f8b-45b4-9443-7775f52aacb1\" (UID: \"fd22d248-4f8b-45b4-9443-7775f52aacb1\") " Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.946769 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-utilities" (OuterVolumeSpecName: "utilities") pod "fd22d248-4f8b-45b4-9443-7775f52aacb1" (UID: "fd22d248-4f8b-45b4-9443-7775f52aacb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.951980 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fd22d248-4f8b-45b4-9443-7775f52aacb1-kube-api-access-k4d87" (OuterVolumeSpecName: "kube-api-access-k4d87") pod "fd22d248-4f8b-45b4-9443-7775f52aacb1" (UID: "fd22d248-4f8b-45b4-9443-7775f52aacb1"). InnerVolumeSpecName "kube-api-access-k4d87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:00:10 crc kubenswrapper[4857]: I1128 15:00:10.963402 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fd22d248-4f8b-45b4-9443-7775f52aacb1" (UID: "fd22d248-4f8b-45b4-9443-7775f52aacb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:00:11 crc kubenswrapper[4857]: I1128 15:00:11.047416 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4d87\" (UniqueName: \"kubernetes.io/projected/fd22d248-4f8b-45b4-9443-7775f52aacb1-kube-api-access-k4d87\") on node \"crc\" DevicePath \"\"" Nov 28 15:00:11 crc kubenswrapper[4857]: I1128 15:00:11.047686 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:00:11 crc kubenswrapper[4857]: I1128 15:00:11.047746 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fd22d248-4f8b-45b4-9443-7775f52aacb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:00:11 crc kubenswrapper[4857]: I1128 15:00:11.129513 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqngt"] Nov 28 15:00:11 crc kubenswrapper[4857]: I1128 15:00:11.137677 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rqngt"] Nov 28 15:00:12 crc kubenswrapper[4857]: I1128 15:00:12.246913 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" path="/var/lib/kubelet/pods/fd22d248-4f8b-45b4-9443-7775f52aacb1/volumes" Nov 28 15:00:17 crc kubenswrapper[4857]: I1128 15:00:17.624366 4857 scope.go:117] "RemoveContainer" containerID="53cd5ea547fbbeb5cb167ac1f7a22e7ec9fac0e5e593f4377123f66e74ced88f" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.163498 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405701-542l7"] Nov 28 15:01:00 crc kubenswrapper[4857]: E1128 15:01:00.164859 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="extract-utilities" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.164876 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="extract-utilities" Nov 28 15:01:00 crc kubenswrapper[4857]: E1128 15:01:00.164898 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffebd404-bed5-47cb-b62c-2623c1b59568" containerName="collect-profiles" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.164906 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffebd404-bed5-47cb-b62c-2623c1b59568" containerName="collect-profiles" Nov 28 15:01:00 crc kubenswrapper[4857]: E1128 15:01:00.164934 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="extract-content" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.164962 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="extract-content" Nov 28 15:01:00 crc kubenswrapper[4857]: E1128 15:01:00.164986 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="registry-server" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.164994 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="registry-server" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.165202 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fd22d248-4f8b-45b4-9443-7775f52aacb1" containerName="registry-server" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.165227 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffebd404-bed5-47cb-b62c-2623c1b59568" containerName="collect-profiles" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.165914 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.184372 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405701-542l7"] Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.240451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd2cw\" (UniqueName: \"kubernetes.io/projected/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-kube-api-access-cd2cw\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.240556 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-fernet-keys\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.240578 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-config-data\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.240610 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-combined-ca-bundle\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.343227 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd2cw\" (UniqueName: \"kubernetes.io/projected/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-kube-api-access-cd2cw\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.343369 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-fernet-keys\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.345775 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-config-data\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.345929 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-combined-ca-bundle\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.351149 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-fernet-keys\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.358709 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-combined-ca-bundle\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.359479 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-config-data\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.364645 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd2cw\" (UniqueName: \"kubernetes.io/projected/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-kube-api-access-cd2cw\") pod \"keystone-cron-29405701-542l7\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.491243 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:00 crc kubenswrapper[4857]: I1128 15:01:00.988276 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405701-542l7"] Nov 28 15:01:00 crc kubenswrapper[4857]: W1128 15:01:00.992415 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2518c7c_8ec1_4d12_bcca_33fa5496fe45.slice/crio-e59b73f95247430e65c9195ac48a7092768a3931239c0b2003d87f2d5e91006b WatchSource:0}: Error finding container e59b73f95247430e65c9195ac48a7092768a3931239c0b2003d87f2d5e91006b: Status 404 returned error can't find the container with id e59b73f95247430e65c9195ac48a7092768a3931239c0b2003d87f2d5e91006b Nov 28 15:01:01 crc kubenswrapper[4857]: I1128 15:01:01.383262 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405701-542l7" event={"ID":"b2518c7c-8ec1-4d12-bcca-33fa5496fe45","Type":"ContainerStarted","Data":"3c98c9ec94cd7468e24a2fdd181933c765084fb5386f0f2f397e82ad39a74704"} Nov 28 15:01:01 crc kubenswrapper[4857]: I1128 15:01:01.385848 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405701-542l7" event={"ID":"b2518c7c-8ec1-4d12-bcca-33fa5496fe45","Type":"ContainerStarted","Data":"e59b73f95247430e65c9195ac48a7092768a3931239c0b2003d87f2d5e91006b"} Nov 28 15:01:01 crc kubenswrapper[4857]: I1128 15:01:01.416228 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405701-542l7" podStartSLOduration=1.416198746 podStartE2EDuration="1.416198746s" podCreationTimestamp="2025-11-28 15:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:01:01.408520691 +0000 UTC m=+5511.532462128" watchObservedRunningTime="2025-11-28 15:01:01.416198746 +0000 UTC m=+5511.540140223" Nov 28 15:01:03 crc kubenswrapper[4857]: I1128 15:01:03.409076 4857 generic.go:334] "Generic (PLEG): container finished" podID="b2518c7c-8ec1-4d12-bcca-33fa5496fe45" containerID="3c98c9ec94cd7468e24a2fdd181933c765084fb5386f0f2f397e82ad39a74704" exitCode=0 Nov 28 15:01:03 crc kubenswrapper[4857]: I1128 15:01:03.409141 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405701-542l7" event={"ID":"b2518c7c-8ec1-4d12-bcca-33fa5496fe45","Type":"ContainerDied","Data":"3c98c9ec94cd7468e24a2fdd181933c765084fb5386f0f2f397e82ad39a74704"} Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.747820 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.771081 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-config-data\") pod \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.771365 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-combined-ca-bundle\") pod \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.771456 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-fernet-keys\") pod \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.771746 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cd2cw\" (UniqueName: \"kubernetes.io/projected/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-kube-api-access-cd2cw\") pod \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\" (UID: \"b2518c7c-8ec1-4d12-bcca-33fa5496fe45\") " Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.785391 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b2518c7c-8ec1-4d12-bcca-33fa5496fe45" (UID: "b2518c7c-8ec1-4d12-bcca-33fa5496fe45"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.785659 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-kube-api-access-cd2cw" (OuterVolumeSpecName: "kube-api-access-cd2cw") pod "b2518c7c-8ec1-4d12-bcca-33fa5496fe45" (UID: "b2518c7c-8ec1-4d12-bcca-33fa5496fe45"). InnerVolumeSpecName "kube-api-access-cd2cw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.819209 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2518c7c-8ec1-4d12-bcca-33fa5496fe45" (UID: "b2518c7c-8ec1-4d12-bcca-33fa5496fe45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.837441 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-config-data" (OuterVolumeSpecName: "config-data") pod "b2518c7c-8ec1-4d12-bcca-33fa5496fe45" (UID: "b2518c7c-8ec1-4d12-bcca-33fa5496fe45"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.873472 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.873512 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.873568 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:04 crc kubenswrapper[4857]: I1128 15:01:04.873587 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cd2cw\" (UniqueName: \"kubernetes.io/projected/b2518c7c-8ec1-4d12-bcca-33fa5496fe45-kube-api-access-cd2cw\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:05 crc kubenswrapper[4857]: I1128 15:01:05.431823 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405701-542l7" event={"ID":"b2518c7c-8ec1-4d12-bcca-33fa5496fe45","Type":"ContainerDied","Data":"e59b73f95247430e65c9195ac48a7092768a3931239c0b2003d87f2d5e91006b"} Nov 28 15:01:05 crc kubenswrapper[4857]: I1128 15:01:05.432282 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e59b73f95247430e65c9195ac48a7092768a3931239c0b2003d87f2d5e91006b" Nov 28 15:01:05 crc kubenswrapper[4857]: I1128 15:01:05.432040 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405701-542l7" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.427232 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-nlb4x"] Nov 28 15:01:36 crc kubenswrapper[4857]: E1128 15:01:36.428777 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2518c7c-8ec1-4d12-bcca-33fa5496fe45" containerName="keystone-cron" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.428803 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2518c7c-8ec1-4d12-bcca-33fa5496fe45" containerName="keystone-cron" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.429133 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2518c7c-8ec1-4d12-bcca-33fa5496fe45" containerName="keystone-cron" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.430050 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.434020 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f001-account-create-update-5z7gz"] Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.435150 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.437065 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.443199 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f001-account-create-update-5z7gz"] Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.468769 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr2zf\" (UniqueName: \"kubernetes.io/projected/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-kube-api-access-pr2zf\") pod \"barbican-db-create-nlb4x\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.468883 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85jdq\" (UniqueName: \"kubernetes.io/projected/571ee4e3-3f14-4e48-be69-dd76619de4fa-kube-api-access-85jdq\") pod \"barbican-f001-account-create-update-5z7gz\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.468938 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ee4e3-3f14-4e48-be69-dd76619de4fa-operator-scripts\") pod \"barbican-f001-account-create-update-5z7gz\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.469099 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-operator-scripts\") pod \"barbican-db-create-nlb4x\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.479440 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-nlb4x"] Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.570352 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr2zf\" (UniqueName: \"kubernetes.io/projected/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-kube-api-access-pr2zf\") pod \"barbican-db-create-nlb4x\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.570417 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85jdq\" (UniqueName: \"kubernetes.io/projected/571ee4e3-3f14-4e48-be69-dd76619de4fa-kube-api-access-85jdq\") pod \"barbican-f001-account-create-update-5z7gz\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.570451 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ee4e3-3f14-4e48-be69-dd76619de4fa-operator-scripts\") pod \"barbican-f001-account-create-update-5z7gz\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.570482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-operator-scripts\") pod \"barbican-db-create-nlb4x\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.571289 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-operator-scripts\") pod \"barbican-db-create-nlb4x\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.571879 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ee4e3-3f14-4e48-be69-dd76619de4fa-operator-scripts\") pod \"barbican-f001-account-create-update-5z7gz\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.589851 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr2zf\" (UniqueName: \"kubernetes.io/projected/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-kube-api-access-pr2zf\") pod \"barbican-db-create-nlb4x\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.589894 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85jdq\" (UniqueName: \"kubernetes.io/projected/571ee4e3-3f14-4e48-be69-dd76619de4fa-kube-api-access-85jdq\") pod \"barbican-f001-account-create-update-5z7gz\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.770164 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:36 crc kubenswrapper[4857]: I1128 15:01:36.785299 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.244741 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-nlb4x"] Nov 28 15:01:37 crc kubenswrapper[4857]: W1128 15:01:37.246589 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb6077e1_1cfe_42fb_97fa_e67c1eecb683.slice/crio-d1182e8480d19aaf6e44f009d692ef861f20a87f12c998ad990a0392fd46cac6 WatchSource:0}: Error finding container d1182e8480d19aaf6e44f009d692ef861f20a87f12c998ad990a0392fd46cac6: Status 404 returned error can't find the container with id d1182e8480d19aaf6e44f009d692ef861f20a87f12c998ad990a0392fd46cac6 Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.306829 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f001-account-create-update-5z7gz"] Nov 28 15:01:37 crc kubenswrapper[4857]: W1128 15:01:37.316266 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod571ee4e3_3f14_4e48_be69_dd76619de4fa.slice/crio-e0d299ef5b900e67db6627e47c09b7f99e14f041551aaca57b83d381abb45a5b WatchSource:0}: Error finding container e0d299ef5b900e67db6627e47c09b7f99e14f041551aaca57b83d381abb45a5b: Status 404 returned error can't find the container with id e0d299ef5b900e67db6627e47c09b7f99e14f041551aaca57b83d381abb45a5b Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.922540 4857 generic.go:334] "Generic (PLEG): container finished" podID="fb6077e1-1cfe-42fb-97fa-e67c1eecb683" containerID="39ff0a0b7a01cd011676bb9f10e3e3f07f0635ee6b35c4cc53f803dcabbc8b0c" exitCode=0 Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.923010 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-nlb4x" event={"ID":"fb6077e1-1cfe-42fb-97fa-e67c1eecb683","Type":"ContainerDied","Data":"39ff0a0b7a01cd011676bb9f10e3e3f07f0635ee6b35c4cc53f803dcabbc8b0c"} Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.923041 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-nlb4x" event={"ID":"fb6077e1-1cfe-42fb-97fa-e67c1eecb683","Type":"ContainerStarted","Data":"d1182e8480d19aaf6e44f009d692ef861f20a87f12c998ad990a0392fd46cac6"} Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.925359 4857 generic.go:334] "Generic (PLEG): container finished" podID="571ee4e3-3f14-4e48-be69-dd76619de4fa" containerID="92590d34f84721a2e7caeae9f0e60434bdb61d2886da3a0495b4b614dc20dd25" exitCode=0 Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.925441 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f001-account-create-update-5z7gz" event={"ID":"571ee4e3-3f14-4e48-be69-dd76619de4fa","Type":"ContainerDied","Data":"92590d34f84721a2e7caeae9f0e60434bdb61d2886da3a0495b4b614dc20dd25"} Nov 28 15:01:37 crc kubenswrapper[4857]: I1128 15:01:37.925482 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f001-account-create-update-5z7gz" event={"ID":"571ee4e3-3f14-4e48-be69-dd76619de4fa","Type":"ContainerStarted","Data":"e0d299ef5b900e67db6627e47c09b7f99e14f041551aaca57b83d381abb45a5b"} Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.339291 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.348896 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.425749 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-85jdq\" (UniqueName: \"kubernetes.io/projected/571ee4e3-3f14-4e48-be69-dd76619de4fa-kube-api-access-85jdq\") pod \"571ee4e3-3f14-4e48-be69-dd76619de4fa\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.425882 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ee4e3-3f14-4e48-be69-dd76619de4fa-operator-scripts\") pod \"571ee4e3-3f14-4e48-be69-dd76619de4fa\" (UID: \"571ee4e3-3f14-4e48-be69-dd76619de4fa\") " Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.426002 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr2zf\" (UniqueName: \"kubernetes.io/projected/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-kube-api-access-pr2zf\") pod \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.426039 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-operator-scripts\") pod \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\" (UID: \"fb6077e1-1cfe-42fb-97fa-e67c1eecb683\") " Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.426839 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fb6077e1-1cfe-42fb-97fa-e67c1eecb683" (UID: "fb6077e1-1cfe-42fb-97fa-e67c1eecb683"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.427596 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/571ee4e3-3f14-4e48-be69-dd76619de4fa-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "571ee4e3-3f14-4e48-be69-dd76619de4fa" (UID: "571ee4e3-3f14-4e48-be69-dd76619de4fa"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.434638 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/571ee4e3-3f14-4e48-be69-dd76619de4fa-kube-api-access-85jdq" (OuterVolumeSpecName: "kube-api-access-85jdq") pod "571ee4e3-3f14-4e48-be69-dd76619de4fa" (UID: "571ee4e3-3f14-4e48-be69-dd76619de4fa"). InnerVolumeSpecName "kube-api-access-85jdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.435410 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-kube-api-access-pr2zf" (OuterVolumeSpecName: "kube-api-access-pr2zf") pod "fb6077e1-1cfe-42fb-97fa-e67c1eecb683" (UID: "fb6077e1-1cfe-42fb-97fa-e67c1eecb683"). InnerVolumeSpecName "kube-api-access-pr2zf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.527132 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/571ee4e3-3f14-4e48-be69-dd76619de4fa-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.527172 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr2zf\" (UniqueName: \"kubernetes.io/projected/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-kube-api-access-pr2zf\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.527188 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb6077e1-1cfe-42fb-97fa-e67c1eecb683-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.527201 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-85jdq\" (UniqueName: \"kubernetes.io/projected/571ee4e3-3f14-4e48-be69-dd76619de4fa-kube-api-access-85jdq\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.966261 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f001-account-create-update-5z7gz" event={"ID":"571ee4e3-3f14-4e48-be69-dd76619de4fa","Type":"ContainerDied","Data":"e0d299ef5b900e67db6627e47c09b7f99e14f041551aaca57b83d381abb45a5b"} Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.966313 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0d299ef5b900e67db6627e47c09b7f99e14f041551aaca57b83d381abb45a5b" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.966436 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f001-account-create-update-5z7gz" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.968734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-nlb4x" event={"ID":"fb6077e1-1cfe-42fb-97fa-e67c1eecb683","Type":"ContainerDied","Data":"d1182e8480d19aaf6e44f009d692ef861f20a87f12c998ad990a0392fd46cac6"} Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.968823 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d1182e8480d19aaf6e44f009d692ef861f20a87f12c998ad990a0392fd46cac6" Nov 28 15:01:39 crc kubenswrapper[4857]: I1128 15:01:39.968889 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-nlb4x" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.761773 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-dwvtm"] Nov 28 15:01:41 crc kubenswrapper[4857]: E1128 15:01:41.762940 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb6077e1-1cfe-42fb-97fa-e67c1eecb683" containerName="mariadb-database-create" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.762979 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb6077e1-1cfe-42fb-97fa-e67c1eecb683" containerName="mariadb-database-create" Nov 28 15:01:41 crc kubenswrapper[4857]: E1128 15:01:41.763010 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571ee4e3-3f14-4e48-be69-dd76619de4fa" containerName="mariadb-account-create-update" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.763018 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="571ee4e3-3f14-4e48-be69-dd76619de4fa" containerName="mariadb-account-create-update" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.763230 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb6077e1-1cfe-42fb-97fa-e67c1eecb683" containerName="mariadb-database-create" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.763263 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="571ee4e3-3f14-4e48-be69-dd76619de4fa" containerName="mariadb-account-create-update" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.764200 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.769224 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-kgll8" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.769455 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.777477 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-dwvtm"] Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.915566 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-combined-ca-bundle\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.915642 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgqqk\" (UniqueName: \"kubernetes.io/projected/e8326720-d37a-4506-93ce-1bec95cfebf8-kube-api-access-bgqqk\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:41 crc kubenswrapper[4857]: I1128 15:01:41.915766 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-db-sync-config-data\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.017483 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-combined-ca-bundle\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.017559 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgqqk\" (UniqueName: \"kubernetes.io/projected/e8326720-d37a-4506-93ce-1bec95cfebf8-kube-api-access-bgqqk\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.017651 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-db-sync-config-data\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.027346 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-combined-ca-bundle\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.028686 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-db-sync-config-data\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.047297 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgqqk\" (UniqueName: \"kubernetes.io/projected/e8326720-d37a-4506-93ce-1bec95cfebf8-kube-api-access-bgqqk\") pod \"barbican-db-sync-dwvtm\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.087741 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:42 crc kubenswrapper[4857]: I1128 15:01:42.613805 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-dwvtm"] Nov 28 15:01:43 crc kubenswrapper[4857]: I1128 15:01:43.002222 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dwvtm" event={"ID":"e8326720-d37a-4506-93ce-1bec95cfebf8","Type":"ContainerStarted","Data":"7f734298ba3a41cbf3c4ca474870c179f47438d78db4457c6b88ac8b89770c3a"} Nov 28 15:01:43 crc kubenswrapper[4857]: I1128 15:01:43.002811 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dwvtm" event={"ID":"e8326720-d37a-4506-93ce-1bec95cfebf8","Type":"ContainerStarted","Data":"bbba5137a384e291cc9674dc549baf1e37629ce62e4fbdc89623259b95ce5313"} Nov 28 15:01:43 crc kubenswrapper[4857]: I1128 15:01:43.036924 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-dwvtm" podStartSLOduration=2.036879662 podStartE2EDuration="2.036879662s" podCreationTimestamp="2025-11-28 15:01:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:01:43.019066646 +0000 UTC m=+5553.143008133" watchObservedRunningTime="2025-11-28 15:01:43.036879662 +0000 UTC m=+5553.160821169" Nov 28 15:01:44 crc kubenswrapper[4857]: I1128 15:01:44.017751 4857 generic.go:334] "Generic (PLEG): container finished" podID="e8326720-d37a-4506-93ce-1bec95cfebf8" containerID="7f734298ba3a41cbf3c4ca474870c179f47438d78db4457c6b88ac8b89770c3a" exitCode=0 Nov 28 15:01:44 crc kubenswrapper[4857]: I1128 15:01:44.017848 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dwvtm" event={"ID":"e8326720-d37a-4506-93ce-1bec95cfebf8","Type":"ContainerDied","Data":"7f734298ba3a41cbf3c4ca474870c179f47438d78db4457c6b88ac8b89770c3a"} Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.464355 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.594707 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgqqk\" (UniqueName: \"kubernetes.io/projected/e8326720-d37a-4506-93ce-1bec95cfebf8-kube-api-access-bgqqk\") pod \"e8326720-d37a-4506-93ce-1bec95cfebf8\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.594789 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-combined-ca-bundle\") pod \"e8326720-d37a-4506-93ce-1bec95cfebf8\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.594919 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-db-sync-config-data\") pod \"e8326720-d37a-4506-93ce-1bec95cfebf8\" (UID: \"e8326720-d37a-4506-93ce-1bec95cfebf8\") " Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.607150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "e8326720-d37a-4506-93ce-1bec95cfebf8" (UID: "e8326720-d37a-4506-93ce-1bec95cfebf8"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.607185 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8326720-d37a-4506-93ce-1bec95cfebf8-kube-api-access-bgqqk" (OuterVolumeSpecName: "kube-api-access-bgqqk") pod "e8326720-d37a-4506-93ce-1bec95cfebf8" (UID: "e8326720-d37a-4506-93ce-1bec95cfebf8"). InnerVolumeSpecName "kube-api-access-bgqqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.620036 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8326720-d37a-4506-93ce-1bec95cfebf8" (UID: "e8326720-d37a-4506-93ce-1bec95cfebf8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.697801 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.697837 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgqqk\" (UniqueName: \"kubernetes.io/projected/e8326720-d37a-4506-93ce-1bec95cfebf8-kube-api-access-bgqqk\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:45 crc kubenswrapper[4857]: I1128 15:01:45.697874 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8326720-d37a-4506-93ce-1bec95cfebf8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.047292 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-dwvtm" event={"ID":"e8326720-d37a-4506-93ce-1bec95cfebf8","Type":"ContainerDied","Data":"bbba5137a384e291cc9674dc549baf1e37629ce62e4fbdc89623259b95ce5313"} Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.047780 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbba5137a384e291cc9674dc549baf1e37629ce62e4fbdc89623259b95ce5313" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.047533 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-dwvtm" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.292405 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6cf5b4dc75-h9p5p"] Nov 28 15:01:46 crc kubenswrapper[4857]: E1128 15:01:46.292818 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8326720-d37a-4506-93ce-1bec95cfebf8" containerName="barbican-db-sync" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.292834 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8326720-d37a-4506-93ce-1bec95cfebf8" containerName="barbican-db-sync" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.293078 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8326720-d37a-4506-93ce-1bec95cfebf8" containerName="barbican-db-sync" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.294121 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.301496 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.302487 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-kgll8" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.308224 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.308758 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6c66ddbb4-c47hg"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.310773 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.313468 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.320632 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6cf5b4dc75-h9p5p"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.357043 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6c66ddbb4-c47hg"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412063 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad527350-39bf-416d-9b8b-0896ac4012d6-logs\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412125 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnckh\" (UniqueName: \"kubernetes.io/projected/ad527350-39bf-416d-9b8b-0896ac4012d6-kube-api-access-bnckh\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412168 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-combined-ca-bundle\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412214 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-config-data-custom\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412241 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-config-data\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412280 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-config-data\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-combined-ca-bundle\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412370 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbrl2\" (UniqueName: \"kubernetes.io/projected/fd23dc94-d5ca-4419-a180-56bb75922c4b-kube-api-access-lbrl2\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412389 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-config-data-custom\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.412433 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd23dc94-d5ca-4419-a180-56bb75922c4b-logs\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.430739 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6675bbbc7-9gf5q"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.434691 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.450871 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6675bbbc7-9gf5q"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.513918 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-config-data\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514011 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-config-data\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514089 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-combined-ca-bundle\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514119 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-config\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514148 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbrl2\" (UniqueName: \"kubernetes.io/projected/fd23dc94-d5ca-4419-a180-56bb75922c4b-kube-api-access-lbrl2\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514169 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-config-data-custom\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514199 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd23dc94-d5ca-4419-a180-56bb75922c4b-logs\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514240 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-dns-svc\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514469 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-sb\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514579 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad527350-39bf-416d-9b8b-0896ac4012d6-logs\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514601 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnckh\" (UniqueName: \"kubernetes.io/projected/ad527350-39bf-416d-9b8b-0896ac4012d6-kube-api-access-bnckh\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514646 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2gqm\" (UniqueName: \"kubernetes.io/projected/67ca415f-ed01-4831-a0b7-43f1277ba04b-kube-api-access-l2gqm\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514685 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-nb\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514720 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-combined-ca-bundle\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514745 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-config-data-custom\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.514856 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fd23dc94-d5ca-4419-a180-56bb75922c4b-logs\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.517678 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ad527350-39bf-416d-9b8b-0896ac4012d6-logs\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.521692 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-config-data-custom\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.522245 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-config-data\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.522765 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-config-data\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.535591 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd23dc94-d5ca-4419-a180-56bb75922c4b-combined-ca-bundle\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.536587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-config-data-custom\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.546369 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad527350-39bf-416d-9b8b-0896ac4012d6-combined-ca-bundle\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.551383 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnckh\" (UniqueName: \"kubernetes.io/projected/ad527350-39bf-416d-9b8b-0896ac4012d6-kube-api-access-bnckh\") pod \"barbican-worker-6cf5b4dc75-h9p5p\" (UID: \"ad527350-39bf-416d-9b8b-0896ac4012d6\") " pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.554617 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbrl2\" (UniqueName: \"kubernetes.io/projected/fd23dc94-d5ca-4419-a180-56bb75922c4b-kube-api-access-lbrl2\") pod \"barbican-keystone-listener-6c66ddbb4-c47hg\" (UID: \"fd23dc94-d5ca-4419-a180-56bb75922c4b\") " pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.582014 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5865985776-dd6dg"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.583518 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.599422 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.601153 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5865985776-dd6dg"] Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.615680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-config\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.616006 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-dns-svc\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.616120 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-sb\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.616211 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2gqm\" (UniqueName: \"kubernetes.io/projected/67ca415f-ed01-4831-a0b7-43f1277ba04b-kube-api-access-l2gqm\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.616303 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-nb\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.617208 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-nb\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.617788 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-config\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.618440 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-dns-svc\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.619014 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-sb\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.626142 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.654234 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.673156 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2gqm\" (UniqueName: \"kubernetes.io/projected/67ca415f-ed01-4831-a0b7-43f1277ba04b-kube-api-access-l2gqm\") pod \"dnsmasq-dns-6675bbbc7-9gf5q\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.720190 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-config-data-custom\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.720244 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7430258-c1e6-4046-8da2-e35900b3647d-logs\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.720380 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-combined-ca-bundle\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.720424 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-config-data\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.720453 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h98c9\" (UniqueName: \"kubernetes.io/projected/d7430258-c1e6-4046-8da2-e35900b3647d-kube-api-access-h98c9\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.758406 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.828233 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-combined-ca-bundle\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.828319 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-config-data\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.828402 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h98c9\" (UniqueName: \"kubernetes.io/projected/d7430258-c1e6-4046-8da2-e35900b3647d-kube-api-access-h98c9\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.828489 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-config-data-custom\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.828522 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7430258-c1e6-4046-8da2-e35900b3647d-logs\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.829225 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7430258-c1e6-4046-8da2-e35900b3647d-logs\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.832014 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-combined-ca-bundle\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.836456 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-config-data\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.839527 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d7430258-c1e6-4046-8da2-e35900b3647d-config-data-custom\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.851486 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h98c9\" (UniqueName: \"kubernetes.io/projected/d7430258-c1e6-4046-8da2-e35900b3647d-kube-api-access-h98c9\") pod \"barbican-api-5865985776-dd6dg\" (UID: \"d7430258-c1e6-4046-8da2-e35900b3647d\") " pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:46 crc kubenswrapper[4857]: I1128 15:01:46.913451 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:47 crc kubenswrapper[4857]: I1128 15:01:47.234721 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6675bbbc7-9gf5q"] Nov 28 15:01:47 crc kubenswrapper[4857]: I1128 15:01:47.247683 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6cf5b4dc75-h9p5p"] Nov 28 15:01:47 crc kubenswrapper[4857]: W1128 15:01:47.255090 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67ca415f_ed01_4831_a0b7_43f1277ba04b.slice/crio-8ce9c3ec906201bc9d182533025fd53b449bba5e9f23ded9dd58ee6280e3ddd6 WatchSource:0}: Error finding container 8ce9c3ec906201bc9d182533025fd53b449bba5e9f23ded9dd58ee6280e3ddd6: Status 404 returned error can't find the container with id 8ce9c3ec906201bc9d182533025fd53b449bba5e9f23ded9dd58ee6280e3ddd6 Nov 28 15:01:47 crc kubenswrapper[4857]: I1128 15:01:47.342024 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6c66ddbb4-c47hg"] Nov 28 15:01:47 crc kubenswrapper[4857]: W1128 15:01:47.356198 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfd23dc94_d5ca_4419_a180_56bb75922c4b.slice/crio-bdd51d47d720cbd938c9efcbb3207f59331bde28a3891d5ba8e273eb82ba89c9 WatchSource:0}: Error finding container bdd51d47d720cbd938c9efcbb3207f59331bde28a3891d5ba8e273eb82ba89c9: Status 404 returned error can't find the container with id bdd51d47d720cbd938c9efcbb3207f59331bde28a3891d5ba8e273eb82ba89c9 Nov 28 15:01:47 crc kubenswrapper[4857]: I1128 15:01:47.438650 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5865985776-dd6dg"] Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.081209 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" event={"ID":"ad527350-39bf-416d-9b8b-0896ac4012d6","Type":"ContainerStarted","Data":"da77d728b378e0a1480ccf37d056cdde62c469c09576e26d1a0a8561545f33bf"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.083028 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" event={"ID":"ad527350-39bf-416d-9b8b-0896ac4012d6","Type":"ContainerStarted","Data":"cc9ddb1f9058ed44c48ddbe8778c9bb68b3581a11ac9279c49e794b7c485dbac"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.083175 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" event={"ID":"ad527350-39bf-416d-9b8b-0896ac4012d6","Type":"ContainerStarted","Data":"499861a568073f2e08e970c368852758cddb72832a476b009e3f6d1386ac383e"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.085628 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5865985776-dd6dg" event={"ID":"d7430258-c1e6-4046-8da2-e35900b3647d","Type":"ContainerStarted","Data":"c4655fe9f23ad8b51e9414462458cb928c97f1c5f46656033a380f6355e6f39c"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.085677 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5865985776-dd6dg" event={"ID":"d7430258-c1e6-4046-8da2-e35900b3647d","Type":"ContainerStarted","Data":"26ed86432fe2e004e9742363d16da2b874d7649b6e08a5ca2fe23ab193635586"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.085688 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5865985776-dd6dg" event={"ID":"d7430258-c1e6-4046-8da2-e35900b3647d","Type":"ContainerStarted","Data":"0a650ef2f0e71adf68eae27cb10e59df09de57e448cd815d818e66136de43786"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.085791 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.085902 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.088726 4857 generic.go:334] "Generic (PLEG): container finished" podID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerID="37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520" exitCode=0 Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.088863 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" event={"ID":"67ca415f-ed01-4831-a0b7-43f1277ba04b","Type":"ContainerDied","Data":"37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.089166 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" event={"ID":"67ca415f-ed01-4831-a0b7-43f1277ba04b","Type":"ContainerStarted","Data":"8ce9c3ec906201bc9d182533025fd53b449bba5e9f23ded9dd58ee6280e3ddd6"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.106582 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6cf5b4dc75-h9p5p" podStartSLOduration=2.106561257 podStartE2EDuration="2.106561257s" podCreationTimestamp="2025-11-28 15:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:01:48.104300176 +0000 UTC m=+5558.228241623" watchObservedRunningTime="2025-11-28 15:01:48.106561257 +0000 UTC m=+5558.230502694" Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.107247 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" event={"ID":"fd23dc94-d5ca-4419-a180-56bb75922c4b","Type":"ContainerStarted","Data":"a073903fe8fa2a0c3c7b600e400b06fed5e3208dd77a815b4aa2434bd3614e0b"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.107308 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" event={"ID":"fd23dc94-d5ca-4419-a180-56bb75922c4b","Type":"ContainerStarted","Data":"2237e7b9c135d6e7b2ec972ffb73050cfc2c860913233e20f6755ef2256c18f1"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.107321 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" event={"ID":"fd23dc94-d5ca-4419-a180-56bb75922c4b","Type":"ContainerStarted","Data":"bdd51d47d720cbd938c9efcbb3207f59331bde28a3891d5ba8e273eb82ba89c9"} Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.133762 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5865985776-dd6dg" podStartSLOduration=2.133740653 podStartE2EDuration="2.133740653s" podCreationTimestamp="2025-11-28 15:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:01:48.132469639 +0000 UTC m=+5558.256411086" watchObservedRunningTime="2025-11-28 15:01:48.133740653 +0000 UTC m=+5558.257682090" Nov 28 15:01:48 crc kubenswrapper[4857]: I1128 15:01:48.180722 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6c66ddbb4-c47hg" podStartSLOduration=2.180703207 podStartE2EDuration="2.180703207s" podCreationTimestamp="2025-11-28 15:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:01:48.179521606 +0000 UTC m=+5558.303463053" watchObservedRunningTime="2025-11-28 15:01:48.180703207 +0000 UTC m=+5558.304644644" Nov 28 15:01:49 crc kubenswrapper[4857]: I1128 15:01:49.120471 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" event={"ID":"67ca415f-ed01-4831-a0b7-43f1277ba04b","Type":"ContainerStarted","Data":"03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987"} Nov 28 15:01:49 crc kubenswrapper[4857]: I1128 15:01:49.150832 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" podStartSLOduration=3.15081662 podStartE2EDuration="3.15081662s" podCreationTimestamp="2025-11-28 15:01:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:01:49.147129521 +0000 UTC m=+5559.271070958" watchObservedRunningTime="2025-11-28 15:01:49.15081662 +0000 UTC m=+5559.274758047" Nov 28 15:01:50 crc kubenswrapper[4857]: I1128 15:01:50.129900 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:56 crc kubenswrapper[4857]: I1128 15:01:56.760165 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:01:56 crc kubenswrapper[4857]: I1128 15:01:56.847132 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84579fb745-sxp7x"] Nov 28 15:01:56 crc kubenswrapper[4857]: I1128 15:01:56.847374 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerName="dnsmasq-dns" containerID="cri-o://1bed143d28f937e7dd46ca81891dfbcf1ef54efebf63f89151307deb62d060a1" gracePeriod=10 Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.213711 4857 generic.go:334] "Generic (PLEG): container finished" podID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerID="1bed143d28f937e7dd46ca81891dfbcf1ef54efebf63f89151307deb62d060a1" exitCode=0 Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.214062 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" event={"ID":"489c405d-7a9b-47ec-9a23-77c51bc73f0a","Type":"ContainerDied","Data":"1bed143d28f937e7dd46ca81891dfbcf1ef54efebf63f89151307deb62d060a1"} Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.392025 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.496003 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-nb\") pod \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.497513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-sb\") pod \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.497604 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-dns-svc\") pod \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.497649 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5qb9\" (UniqueName: \"kubernetes.io/projected/489c405d-7a9b-47ec-9a23-77c51bc73f0a-kube-api-access-d5qb9\") pod \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.497733 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-config\") pod \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\" (UID: \"489c405d-7a9b-47ec-9a23-77c51bc73f0a\") " Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.516140 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/489c405d-7a9b-47ec-9a23-77c51bc73f0a-kube-api-access-d5qb9" (OuterVolumeSpecName: "kube-api-access-d5qb9") pod "489c405d-7a9b-47ec-9a23-77c51bc73f0a" (UID: "489c405d-7a9b-47ec-9a23-77c51bc73f0a"). InnerVolumeSpecName "kube-api-access-d5qb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.538154 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "489c405d-7a9b-47ec-9a23-77c51bc73f0a" (UID: "489c405d-7a9b-47ec-9a23-77c51bc73f0a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.541343 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-config" (OuterVolumeSpecName: "config") pod "489c405d-7a9b-47ec-9a23-77c51bc73f0a" (UID: "489c405d-7a9b-47ec-9a23-77c51bc73f0a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.547232 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "489c405d-7a9b-47ec-9a23-77c51bc73f0a" (UID: "489c405d-7a9b-47ec-9a23-77c51bc73f0a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.558929 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "489c405d-7a9b-47ec-9a23-77c51bc73f0a" (UID: "489c405d-7a9b-47ec-9a23-77c51bc73f0a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.603162 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.603195 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.603206 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.603217 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5qb9\" (UniqueName: \"kubernetes.io/projected/489c405d-7a9b-47ec-9a23-77c51bc73f0a-kube-api-access-d5qb9\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:57 crc kubenswrapper[4857]: I1128 15:01:57.603228 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/489c405d-7a9b-47ec-9a23-77c51bc73f0a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.232226 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.257600 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84579fb745-sxp7x" event={"ID":"489c405d-7a9b-47ec-9a23-77c51bc73f0a","Type":"ContainerDied","Data":"9393f3163363f8e20ae9199b263e4d0998a5c972481c0ac0d0809534649e18f5"} Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.257692 4857 scope.go:117] "RemoveContainer" containerID="1bed143d28f937e7dd46ca81891dfbcf1ef54efebf63f89151307deb62d060a1" Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.289760 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.305130 4857 scope.go:117] "RemoveContainer" containerID="316525afd043a12c14f7fe7680d359118a05c982c5f8aa8d79d9b8a742bce663" Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.305483 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5865985776-dd6dg" Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.325064 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84579fb745-sxp7x"] Nov 28 15:01:58 crc kubenswrapper[4857]: I1128 15:01:58.339106 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84579fb745-sxp7x"] Nov 28 15:02:00 crc kubenswrapper[4857]: I1128 15:02:00.275036 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" path="/var/lib/kubelet/pods/489c405d-7a9b-47ec-9a23-77c51bc73f0a/volumes" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.061517 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-cnbmb"] Nov 28 15:02:11 crc kubenswrapper[4857]: E1128 15:02:11.062441 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerName="init" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.062452 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerName="init" Nov 28 15:02:11 crc kubenswrapper[4857]: E1128 15:02:11.062466 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerName="dnsmasq-dns" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.062471 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerName="dnsmasq-dns" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.062635 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="489c405d-7a9b-47ec-9a23-77c51bc73f0a" containerName="dnsmasq-dns" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.063184 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.091514 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-cnbmb"] Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.163397 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-ed96-account-create-update-jvjfk"] Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.164901 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.167214 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.185539 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ed96-account-create-update-jvjfk"] Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.216237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddlcg\" (UniqueName: \"kubernetes.io/projected/841bff9a-10d1-4c28-b32b-61c18325e2c5-kube-api-access-ddlcg\") pod \"neutron-db-create-cnbmb\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.216471 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bff9a-10d1-4c28-b32b-61c18325e2c5-operator-scripts\") pod \"neutron-db-create-cnbmb\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.308468 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.308559 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.318000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92qz5\" (UniqueName: \"kubernetes.io/projected/aa05d80d-4f5e-4599-8faa-491f6f6a641f-kube-api-access-92qz5\") pod \"neutron-ed96-account-create-update-jvjfk\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.318092 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa05d80d-4f5e-4599-8faa-491f6f6a641f-operator-scripts\") pod \"neutron-ed96-account-create-update-jvjfk\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.318302 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddlcg\" (UniqueName: \"kubernetes.io/projected/841bff9a-10d1-4c28-b32b-61c18325e2c5-kube-api-access-ddlcg\") pod \"neutron-db-create-cnbmb\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.318537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bff9a-10d1-4c28-b32b-61c18325e2c5-operator-scripts\") pod \"neutron-db-create-cnbmb\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.319291 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bff9a-10d1-4c28-b32b-61c18325e2c5-operator-scripts\") pod \"neutron-db-create-cnbmb\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.336283 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddlcg\" (UniqueName: \"kubernetes.io/projected/841bff9a-10d1-4c28-b32b-61c18325e2c5-kube-api-access-ddlcg\") pod \"neutron-db-create-cnbmb\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.384867 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.420843 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa05d80d-4f5e-4599-8faa-491f6f6a641f-operator-scripts\") pod \"neutron-ed96-account-create-update-jvjfk\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.421500 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92qz5\" (UniqueName: \"kubernetes.io/projected/aa05d80d-4f5e-4599-8faa-491f6f6a641f-kube-api-access-92qz5\") pod \"neutron-ed96-account-create-update-jvjfk\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.422481 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa05d80d-4f5e-4599-8faa-491f6f6a641f-operator-scripts\") pod \"neutron-ed96-account-create-update-jvjfk\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.447935 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92qz5\" (UniqueName: \"kubernetes.io/projected/aa05d80d-4f5e-4599-8faa-491f6f6a641f-kube-api-access-92qz5\") pod \"neutron-ed96-account-create-update-jvjfk\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.481308 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:11 crc kubenswrapper[4857]: W1128 15:02:11.891166 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod841bff9a_10d1_4c28_b32b_61c18325e2c5.slice/crio-dad898290da90f9c3afa8b3fde0f94e6ec8a7a646082f2680c9a537ed80f13c8 WatchSource:0}: Error finding container dad898290da90f9c3afa8b3fde0f94e6ec8a7a646082f2680c9a537ed80f13c8: Status 404 returned error can't find the container with id dad898290da90f9c3afa8b3fde0f94e6ec8a7a646082f2680c9a537ed80f13c8 Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.891598 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-cnbmb"] Nov 28 15:02:11 crc kubenswrapper[4857]: W1128 15:02:11.984461 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa05d80d_4f5e_4599_8faa_491f6f6a641f.slice/crio-4efcb9f6e5e44debddc0bbcb46add798afe36b0994bf7df5fbfc2a6af0d22176 WatchSource:0}: Error finding container 4efcb9f6e5e44debddc0bbcb46add798afe36b0994bf7df5fbfc2a6af0d22176: Status 404 returned error can't find the container with id 4efcb9f6e5e44debddc0bbcb46add798afe36b0994bf7df5fbfc2a6af0d22176 Nov 28 15:02:11 crc kubenswrapper[4857]: I1128 15:02:11.992788 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-ed96-account-create-update-jvjfk"] Nov 28 15:02:12 crc kubenswrapper[4857]: I1128 15:02:12.405649 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ed96-account-create-update-jvjfk" event={"ID":"aa05d80d-4f5e-4599-8faa-491f6f6a641f","Type":"ContainerStarted","Data":"7a2a489cf5cefc4618f7d79e150c9b12bcebe3908fa13cf6c78ce0569b154351"} Nov 28 15:02:12 crc kubenswrapper[4857]: I1128 15:02:12.405728 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ed96-account-create-update-jvjfk" event={"ID":"aa05d80d-4f5e-4599-8faa-491f6f6a641f","Type":"ContainerStarted","Data":"4efcb9f6e5e44debddc0bbcb46add798afe36b0994bf7df5fbfc2a6af0d22176"} Nov 28 15:02:12 crc kubenswrapper[4857]: I1128 15:02:12.407661 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-cnbmb" event={"ID":"841bff9a-10d1-4c28-b32b-61c18325e2c5","Type":"ContainerStarted","Data":"bd795e43cf366b5b44628151afc3305d28178601944c9b5bded7f5993e49d5bb"} Nov 28 15:02:12 crc kubenswrapper[4857]: I1128 15:02:12.407708 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-cnbmb" event={"ID":"841bff9a-10d1-4c28-b32b-61c18325e2c5","Type":"ContainerStarted","Data":"dad898290da90f9c3afa8b3fde0f94e6ec8a7a646082f2680c9a537ed80f13c8"} Nov 28 15:02:12 crc kubenswrapper[4857]: I1128 15:02:12.425841 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-ed96-account-create-update-jvjfk" podStartSLOduration=1.425814309 podStartE2EDuration="1.425814309s" podCreationTimestamp="2025-11-28 15:02:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:02:12.420823046 +0000 UTC m=+5582.544764493" watchObservedRunningTime="2025-11-28 15:02:12.425814309 +0000 UTC m=+5582.549755756" Nov 28 15:02:12 crc kubenswrapper[4857]: I1128 15:02:12.444747 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-cnbmb" podStartSLOduration=1.444724445 podStartE2EDuration="1.444724445s" podCreationTimestamp="2025-11-28 15:02:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:02:12.436194547 +0000 UTC m=+5582.560136014" watchObservedRunningTime="2025-11-28 15:02:12.444724445 +0000 UTC m=+5582.568665872" Nov 28 15:02:13 crc kubenswrapper[4857]: I1128 15:02:13.422805 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa05d80d-4f5e-4599-8faa-491f6f6a641f" containerID="7a2a489cf5cefc4618f7d79e150c9b12bcebe3908fa13cf6c78ce0569b154351" exitCode=0 Nov 28 15:02:13 crc kubenswrapper[4857]: I1128 15:02:13.422878 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ed96-account-create-update-jvjfk" event={"ID":"aa05d80d-4f5e-4599-8faa-491f6f6a641f","Type":"ContainerDied","Data":"7a2a489cf5cefc4618f7d79e150c9b12bcebe3908fa13cf6c78ce0569b154351"} Nov 28 15:02:13 crc kubenswrapper[4857]: I1128 15:02:13.427097 4857 generic.go:334] "Generic (PLEG): container finished" podID="841bff9a-10d1-4c28-b32b-61c18325e2c5" containerID="bd795e43cf366b5b44628151afc3305d28178601944c9b5bded7f5993e49d5bb" exitCode=0 Nov 28 15:02:13 crc kubenswrapper[4857]: I1128 15:02:13.427178 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-cnbmb" event={"ID":"841bff9a-10d1-4c28-b32b-61c18325e2c5","Type":"ContainerDied","Data":"bd795e43cf366b5b44628151afc3305d28178601944c9b5bded7f5993e49d5bb"} Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.918234 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.926805 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.993589 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa05d80d-4f5e-4599-8faa-491f6f6a641f-operator-scripts\") pod \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.993682 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddlcg\" (UniqueName: \"kubernetes.io/projected/841bff9a-10d1-4c28-b32b-61c18325e2c5-kube-api-access-ddlcg\") pod \"841bff9a-10d1-4c28-b32b-61c18325e2c5\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.993798 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92qz5\" (UniqueName: \"kubernetes.io/projected/aa05d80d-4f5e-4599-8faa-491f6f6a641f-kube-api-access-92qz5\") pod \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\" (UID: \"aa05d80d-4f5e-4599-8faa-491f6f6a641f\") " Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.993844 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bff9a-10d1-4c28-b32b-61c18325e2c5-operator-scripts\") pod \"841bff9a-10d1-4c28-b32b-61c18325e2c5\" (UID: \"841bff9a-10d1-4c28-b32b-61c18325e2c5\") " Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.994385 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa05d80d-4f5e-4599-8faa-491f6f6a641f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aa05d80d-4f5e-4599-8faa-491f6f6a641f" (UID: "aa05d80d-4f5e-4599-8faa-491f6f6a641f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.994827 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/841bff9a-10d1-4c28-b32b-61c18325e2c5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "841bff9a-10d1-4c28-b32b-61c18325e2c5" (UID: "841bff9a-10d1-4c28-b32b-61c18325e2c5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.995082 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aa05d80d-4f5e-4599-8faa-491f6f6a641f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:14 crc kubenswrapper[4857]: I1128 15:02:14.995112 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/841bff9a-10d1-4c28-b32b-61c18325e2c5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.001568 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/841bff9a-10d1-4c28-b32b-61c18325e2c5-kube-api-access-ddlcg" (OuterVolumeSpecName: "kube-api-access-ddlcg") pod "841bff9a-10d1-4c28-b32b-61c18325e2c5" (UID: "841bff9a-10d1-4c28-b32b-61c18325e2c5"). InnerVolumeSpecName "kube-api-access-ddlcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.002589 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa05d80d-4f5e-4599-8faa-491f6f6a641f-kube-api-access-92qz5" (OuterVolumeSpecName: "kube-api-access-92qz5") pod "aa05d80d-4f5e-4599-8faa-491f6f6a641f" (UID: "aa05d80d-4f5e-4599-8faa-491f6f6a641f"). InnerVolumeSpecName "kube-api-access-92qz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.097325 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddlcg\" (UniqueName: \"kubernetes.io/projected/841bff9a-10d1-4c28-b32b-61c18325e2c5-kube-api-access-ddlcg\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.097362 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92qz5\" (UniqueName: \"kubernetes.io/projected/aa05d80d-4f5e-4599-8faa-491f6f6a641f-kube-api-access-92qz5\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.455411 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-cnbmb" event={"ID":"841bff9a-10d1-4c28-b32b-61c18325e2c5","Type":"ContainerDied","Data":"dad898290da90f9c3afa8b3fde0f94e6ec8a7a646082f2680c9a537ed80f13c8"} Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.455456 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dad898290da90f9c3afa8b3fde0f94e6ec8a7a646082f2680c9a537ed80f13c8" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.455491 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-cnbmb" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.464417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-ed96-account-create-update-jvjfk" event={"ID":"aa05d80d-4f5e-4599-8faa-491f6f6a641f","Type":"ContainerDied","Data":"4efcb9f6e5e44debddc0bbcb46add798afe36b0994bf7df5fbfc2a6af0d22176"} Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.464472 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4efcb9f6e5e44debddc0bbcb46add798afe36b0994bf7df5fbfc2a6af0d22176" Nov 28 15:02:15 crc kubenswrapper[4857]: I1128 15:02:15.464552 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-ed96-account-create-update-jvjfk" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.387574 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-whh9q"] Nov 28 15:02:16 crc kubenswrapper[4857]: E1128 15:02:16.388425 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="841bff9a-10d1-4c28-b32b-61c18325e2c5" containerName="mariadb-database-create" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.388441 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="841bff9a-10d1-4c28-b32b-61c18325e2c5" containerName="mariadb-database-create" Nov 28 15:02:16 crc kubenswrapper[4857]: E1128 15:02:16.388470 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa05d80d-4f5e-4599-8faa-491f6f6a641f" containerName="mariadb-account-create-update" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.388479 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa05d80d-4f5e-4599-8faa-491f6f6a641f" containerName="mariadb-account-create-update" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.388677 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa05d80d-4f5e-4599-8faa-491f6f6a641f" containerName="mariadb-account-create-update" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.388693 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="841bff9a-10d1-4c28-b32b-61c18325e2c5" containerName="mariadb-database-create" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.389446 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.392218 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.394716 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.402074 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-whh9q"] Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.402538 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xd9ft" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.425987 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.426172 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-combined-ca-bundle\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.426248 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w2zr\" (UniqueName: \"kubernetes.io/projected/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-kube-api-access-7w2zr\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.527876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.528053 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-combined-ca-bundle\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.528171 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w2zr\" (UniqueName: \"kubernetes.io/projected/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-kube-api-access-7w2zr\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.539196 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.539507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-combined-ca-bundle\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.557266 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w2zr\" (UniqueName: \"kubernetes.io/projected/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-kube-api-access-7w2zr\") pod \"neutron-db-sync-whh9q\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:16 crc kubenswrapper[4857]: I1128 15:02:16.726010 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:17 crc kubenswrapper[4857]: I1128 15:02:17.193581 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-whh9q"] Nov 28 15:02:17 crc kubenswrapper[4857]: I1128 15:02:17.489370 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-whh9q" event={"ID":"dd3bbc29-eabb-408a-88d2-5adfe6e67e36","Type":"ContainerStarted","Data":"7fc7b802ed3133a945d349157a959758c40721b83025ececa525f9df3d7bdc33"} Nov 28 15:02:17 crc kubenswrapper[4857]: I1128 15:02:17.489955 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-whh9q" event={"ID":"dd3bbc29-eabb-408a-88d2-5adfe6e67e36","Type":"ContainerStarted","Data":"2c71d0ee4c8849b57b5ad5444dcc11e9db721ab86943c7bfde86068dca472043"} Nov 28 15:02:17 crc kubenswrapper[4857]: I1128 15:02:17.517576 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-whh9q" podStartSLOduration=1.5175559029999999 podStartE2EDuration="1.517555903s" podCreationTimestamp="2025-11-28 15:02:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:02:17.514189224 +0000 UTC m=+5587.638130701" watchObservedRunningTime="2025-11-28 15:02:17.517555903 +0000 UTC m=+5587.641497340" Nov 28 15:02:21 crc kubenswrapper[4857]: I1128 15:02:21.535573 4857 generic.go:334] "Generic (PLEG): container finished" podID="dd3bbc29-eabb-408a-88d2-5adfe6e67e36" containerID="7fc7b802ed3133a945d349157a959758c40721b83025ececa525f9df3d7bdc33" exitCode=0 Nov 28 15:02:21 crc kubenswrapper[4857]: I1128 15:02:21.535677 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-whh9q" event={"ID":"dd3bbc29-eabb-408a-88d2-5adfe6e67e36","Type":"ContainerDied","Data":"7fc7b802ed3133a945d349157a959758c40721b83025ececa525f9df3d7bdc33"} Nov 28 15:02:22 crc kubenswrapper[4857]: I1128 15:02:22.909743 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:22 crc kubenswrapper[4857]: I1128 15:02:22.986221 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7w2zr\" (UniqueName: \"kubernetes.io/projected/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-kube-api-access-7w2zr\") pod \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " Nov 28 15:02:22 crc kubenswrapper[4857]: I1128 15:02:22.986347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config\") pod \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " Nov 28 15:02:22 crc kubenswrapper[4857]: I1128 15:02:22.986506 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-combined-ca-bundle\") pod \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " Nov 28 15:02:22 crc kubenswrapper[4857]: I1128 15:02:22.996530 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-kube-api-access-7w2zr" (OuterVolumeSpecName: "kube-api-access-7w2zr") pod "dd3bbc29-eabb-408a-88d2-5adfe6e67e36" (UID: "dd3bbc29-eabb-408a-88d2-5adfe6e67e36"). InnerVolumeSpecName "kube-api-access-7w2zr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:02:23 crc kubenswrapper[4857]: E1128 15:02:23.027363 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config podName:dd3bbc29-eabb-408a-88d2-5adfe6e67e36 nodeName:}" failed. No retries permitted until 2025-11-28 15:02:23.527317583 +0000 UTC m=+5593.651259060 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config" (UniqueName: "kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config") pod "dd3bbc29-eabb-408a-88d2-5adfe6e67e36" (UID: "dd3bbc29-eabb-408a-88d2-5adfe6e67e36") : error deleting /var/lib/kubelet/pods/dd3bbc29-eabb-408a-88d2-5adfe6e67e36/volume-subpaths: remove /var/lib/kubelet/pods/dd3bbc29-eabb-408a-88d2-5adfe6e67e36/volume-subpaths: no such file or directory Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.031862 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd3bbc29-eabb-408a-88d2-5adfe6e67e36" (UID: "dd3bbc29-eabb-408a-88d2-5adfe6e67e36"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.089143 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7w2zr\" (UniqueName: \"kubernetes.io/projected/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-kube-api-access-7w2zr\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.089176 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.563547 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-whh9q" event={"ID":"dd3bbc29-eabb-408a-88d2-5adfe6e67e36","Type":"ContainerDied","Data":"2c71d0ee4c8849b57b5ad5444dcc11e9db721ab86943c7bfde86068dca472043"} Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.563636 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c71d0ee4c8849b57b5ad5444dcc11e9db721ab86943c7bfde86068dca472043" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.563657 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-whh9q" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.599097 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config\") pod \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\" (UID: \"dd3bbc29-eabb-408a-88d2-5adfe6e67e36\") " Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.604723 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config" (OuterVolumeSpecName: "config") pod "dd3bbc29-eabb-408a-88d2-5adfe6e67e36" (UID: "dd3bbc29-eabb-408a-88d2-5adfe6e67e36"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.701263 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/dd3bbc29-eabb-408a-88d2-5adfe6e67e36-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.750873 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf"] Nov 28 15:02:23 crc kubenswrapper[4857]: E1128 15:02:23.753019 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd3bbc29-eabb-408a-88d2-5adfe6e67e36" containerName="neutron-db-sync" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.753047 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd3bbc29-eabb-408a-88d2-5adfe6e67e36" containerName="neutron-db-sync" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.753277 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd3bbc29-eabb-408a-88d2-5adfe6e67e36" containerName="neutron-db-sync" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.754552 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.770087 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf"] Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.803632 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-config\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.803693 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lfnrt\" (UniqueName: \"kubernetes.io/projected/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-kube-api-access-lfnrt\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.803778 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-nb\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.803857 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-sb\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.804113 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-dns-svc\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.834020 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-86b5dbcd7c-xktg6"] Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.835516 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.864912 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86b5dbcd7c-xktg6"] Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.911306 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-nb\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.911361 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-sb\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.911436 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-dns-svc\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.911495 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-config\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.911521 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lfnrt\" (UniqueName: \"kubernetes.io/projected/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-kube-api-access-lfnrt\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.912611 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-nb\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.913325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-config\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.913348 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-dns-svc\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.913817 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-sb\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:23 crc kubenswrapper[4857]: I1128 15:02:23.928735 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lfnrt\" (UniqueName: \"kubernetes.io/projected/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-kube-api-access-lfnrt\") pod \"dnsmasq-dns-6bdb9c7ddc-d6lqf\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.013584 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-config\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.013633 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-httpd-config\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.013695 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-combined-ca-bundle\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.013760 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qvnc\" (UniqueName: \"kubernetes.io/projected/214b3bcb-b9dd-4d87-b519-7fd66b84658a-kube-api-access-4qvnc\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.109526 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.115167 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qvnc\" (UniqueName: \"kubernetes.io/projected/214b3bcb-b9dd-4d87-b519-7fd66b84658a-kube-api-access-4qvnc\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.115288 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-config\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.115318 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-httpd-config\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.115370 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-combined-ca-bundle\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.124625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-httpd-config\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.125544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-combined-ca-bundle\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.134137 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/214b3bcb-b9dd-4d87-b519-7fd66b84658a-config\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.140733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qvnc\" (UniqueName: \"kubernetes.io/projected/214b3bcb-b9dd-4d87-b519-7fd66b84658a-kube-api-access-4qvnc\") pod \"neutron-86b5dbcd7c-xktg6\" (UID: \"214b3bcb-b9dd-4d87-b519-7fd66b84658a\") " pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.185334 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.619987 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf"] Nov 28 15:02:24 crc kubenswrapper[4857]: W1128 15:02:24.636536 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c0fdc13_c2e2_4c8c_a5ec_734b7211ef2b.slice/crio-695b51f3eed82e0d1ce90e65dbc9ef71ca87f1389e6dc68ef5595e86c02fb9a0 WatchSource:0}: Error finding container 695b51f3eed82e0d1ce90e65dbc9ef71ca87f1389e6dc68ef5595e86c02fb9a0: Status 404 returned error can't find the container with id 695b51f3eed82e0d1ce90e65dbc9ef71ca87f1389e6dc68ef5595e86c02fb9a0 Nov 28 15:02:24 crc kubenswrapper[4857]: I1128 15:02:24.809009 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86b5dbcd7c-xktg6"] Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.581006 4857 generic.go:334] "Generic (PLEG): container finished" podID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerID="a043b1a851b41d6405fd09d0850dcea8cd256714fce6d4c5ea53155dd4bf5759" exitCode=0 Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.581072 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" event={"ID":"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b","Type":"ContainerDied","Data":"a043b1a851b41d6405fd09d0850dcea8cd256714fce6d4c5ea53155dd4bf5759"} Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.581346 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" event={"ID":"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b","Type":"ContainerStarted","Data":"695b51f3eed82e0d1ce90e65dbc9ef71ca87f1389e6dc68ef5595e86c02fb9a0"} Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.587804 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86b5dbcd7c-xktg6" event={"ID":"214b3bcb-b9dd-4d87-b519-7fd66b84658a","Type":"ContainerStarted","Data":"fcd7cb42d4597863fb05d91364848c779a959f9390c753d3bfe4e6f2f4628129"} Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.587851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86b5dbcd7c-xktg6" event={"ID":"214b3bcb-b9dd-4d87-b519-7fd66b84658a","Type":"ContainerStarted","Data":"f80560b47d2207510cad0fe16008c92ffaeff3fca247efef9f5e123dbcd63eb1"} Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.587864 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86b5dbcd7c-xktg6" event={"ID":"214b3bcb-b9dd-4d87-b519-7fd66b84658a","Type":"ContainerStarted","Data":"211f294e4614fdbfb2a8b6bf61d8e764ec4ebcc56d40e8352263539acbb1afcf"} Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.588398 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:02:25 crc kubenswrapper[4857]: I1128 15:02:25.624524 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-86b5dbcd7c-xktg6" podStartSLOduration=2.624496255 podStartE2EDuration="2.624496255s" podCreationTimestamp="2025-11-28 15:02:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:02:25.620098427 +0000 UTC m=+5595.744039884" watchObservedRunningTime="2025-11-28 15:02:25.624496255 +0000 UTC m=+5595.748437692" Nov 28 15:02:26 crc kubenswrapper[4857]: I1128 15:02:26.597372 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" event={"ID":"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b","Type":"ContainerStarted","Data":"f559284e524f99cf9527e4cbdfcbd3901150dffd09fafb10b4a5c0317c6619f9"} Nov 28 15:02:26 crc kubenswrapper[4857]: I1128 15:02:26.597783 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:26 crc kubenswrapper[4857]: I1128 15:02:26.628447 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" podStartSLOduration=3.6284276909999997 podStartE2EDuration="3.628427691s" podCreationTimestamp="2025-11-28 15:02:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:02:26.622461731 +0000 UTC m=+5596.746403168" watchObservedRunningTime="2025-11-28 15:02:26.628427691 +0000 UTC m=+5596.752369128" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.112312 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.190665 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6675bbbc7-9gf5q"] Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.191587 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerName="dnsmasq-dns" containerID="cri-o://03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987" gracePeriod=10 Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.691798 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.733047 4857 generic.go:334] "Generic (PLEG): container finished" podID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerID="03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987" exitCode=0 Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.733187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" event={"ID":"67ca415f-ed01-4831-a0b7-43f1277ba04b","Type":"ContainerDied","Data":"03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987"} Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.733216 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" event={"ID":"67ca415f-ed01-4831-a0b7-43f1277ba04b","Type":"ContainerDied","Data":"8ce9c3ec906201bc9d182533025fd53b449bba5e9f23ded9dd58ee6280e3ddd6"} Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.733235 4857 scope.go:117] "RemoveContainer" containerID="03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.733400 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6675bbbc7-9gf5q" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.752318 4857 scope.go:117] "RemoveContainer" containerID="37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.771568 4857 scope.go:117] "RemoveContainer" containerID="03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987" Nov 28 15:02:34 crc kubenswrapper[4857]: E1128 15:02:34.772024 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987\": container with ID starting with 03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987 not found: ID does not exist" containerID="03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.772069 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987"} err="failed to get container status \"03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987\": rpc error: code = NotFound desc = could not find container \"03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987\": container with ID starting with 03fd8797a776b4ea2d54a487cede8214e72aa938341dbe08186c24fd89318987 not found: ID does not exist" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.772097 4857 scope.go:117] "RemoveContainer" containerID="37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520" Nov 28 15:02:34 crc kubenswrapper[4857]: E1128 15:02:34.772336 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520\": container with ID starting with 37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520 not found: ID does not exist" containerID="37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.772352 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520"} err="failed to get container status \"37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520\": rpc error: code = NotFound desc = could not find container \"37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520\": container with ID starting with 37ebf6ab47b3420c84734de45e1446b9dd1a5049df094b2630d04dd2e116f520 not found: ID does not exist" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.852648 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-dns-svc\") pod \"67ca415f-ed01-4831-a0b7-43f1277ba04b\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.852806 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2gqm\" (UniqueName: \"kubernetes.io/projected/67ca415f-ed01-4831-a0b7-43f1277ba04b-kube-api-access-l2gqm\") pod \"67ca415f-ed01-4831-a0b7-43f1277ba04b\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.852982 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-config\") pod \"67ca415f-ed01-4831-a0b7-43f1277ba04b\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.853044 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-nb\") pod \"67ca415f-ed01-4831-a0b7-43f1277ba04b\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.853104 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-sb\") pod \"67ca415f-ed01-4831-a0b7-43f1277ba04b\" (UID: \"67ca415f-ed01-4831-a0b7-43f1277ba04b\") " Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.861044 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67ca415f-ed01-4831-a0b7-43f1277ba04b-kube-api-access-l2gqm" (OuterVolumeSpecName: "kube-api-access-l2gqm") pod "67ca415f-ed01-4831-a0b7-43f1277ba04b" (UID: "67ca415f-ed01-4831-a0b7-43f1277ba04b"). InnerVolumeSpecName "kube-api-access-l2gqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.901097 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "67ca415f-ed01-4831-a0b7-43f1277ba04b" (UID: "67ca415f-ed01-4831-a0b7-43f1277ba04b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.913779 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "67ca415f-ed01-4831-a0b7-43f1277ba04b" (UID: "67ca415f-ed01-4831-a0b7-43f1277ba04b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.913860 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-config" (OuterVolumeSpecName: "config") pod "67ca415f-ed01-4831-a0b7-43f1277ba04b" (UID: "67ca415f-ed01-4831-a0b7-43f1277ba04b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.920536 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "67ca415f-ed01-4831-a0b7-43f1277ba04b" (UID: "67ca415f-ed01-4831-a0b7-43f1277ba04b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.959051 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.959291 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.959354 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.959440 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67ca415f-ed01-4831-a0b7-43f1277ba04b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:34 crc kubenswrapper[4857]: I1128 15:02:34.959498 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2gqm\" (UniqueName: \"kubernetes.io/projected/67ca415f-ed01-4831-a0b7-43f1277ba04b-kube-api-access-l2gqm\") on node \"crc\" DevicePath \"\"" Nov 28 15:02:35 crc kubenswrapper[4857]: I1128 15:02:35.066541 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6675bbbc7-9gf5q"] Nov 28 15:02:35 crc kubenswrapper[4857]: I1128 15:02:35.073368 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6675bbbc7-9gf5q"] Nov 28 15:02:36 crc kubenswrapper[4857]: I1128 15:02:36.242936 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" path="/var/lib/kubelet/pods/67ca415f-ed01-4831-a0b7-43f1277ba04b/volumes" Nov 28 15:02:41 crc kubenswrapper[4857]: I1128 15:02:41.308567 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:02:41 crc kubenswrapper[4857]: I1128 15:02:41.309572 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:02:54 crc kubenswrapper[4857]: I1128 15:02:54.195624 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-86b5dbcd7c-xktg6" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.027196 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-jp2pg"] Nov 28 15:03:02 crc kubenswrapper[4857]: E1128 15:03:02.028503 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerName="init" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.028562 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerName="init" Nov 28 15:03:02 crc kubenswrapper[4857]: E1128 15:03:02.028582 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerName="dnsmasq-dns" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.028588 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerName="dnsmasq-dns" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.028893 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="67ca415f-ed01-4831-a0b7-43f1277ba04b" containerName="dnsmasq-dns" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.029610 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.053054 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-jp2pg"] Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.118089 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-a08e-account-create-update-27n5p"] Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.119447 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.121634 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.126706 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-a08e-account-create-update-27n5p"] Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.186696 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c98cbc3-a049-4544-b3c0-48a74ec08df4-operator-scripts\") pod \"glance-db-create-jp2pg\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.186885 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qdxf\" (UniqueName: \"kubernetes.io/projected/6c98cbc3-a049-4544-b3c0-48a74ec08df4-kube-api-access-4qdxf\") pod \"glance-db-create-jp2pg\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.187008 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl49m\" (UniqueName: \"kubernetes.io/projected/cdb8944a-abc2-4787-949b-fea63d4eba70-kube-api-access-fl49m\") pod \"glance-a08e-account-create-update-27n5p\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.187339 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdb8944a-abc2-4787-949b-fea63d4eba70-operator-scripts\") pod \"glance-a08e-account-create-update-27n5p\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.289390 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c98cbc3-a049-4544-b3c0-48a74ec08df4-operator-scripts\") pod \"glance-db-create-jp2pg\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.289524 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qdxf\" (UniqueName: \"kubernetes.io/projected/6c98cbc3-a049-4544-b3c0-48a74ec08df4-kube-api-access-4qdxf\") pod \"glance-db-create-jp2pg\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.289568 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl49m\" (UniqueName: \"kubernetes.io/projected/cdb8944a-abc2-4787-949b-fea63d4eba70-kube-api-access-fl49m\") pod \"glance-a08e-account-create-update-27n5p\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.290169 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdb8944a-abc2-4787-949b-fea63d4eba70-operator-scripts\") pod \"glance-a08e-account-create-update-27n5p\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.290189 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c98cbc3-a049-4544-b3c0-48a74ec08df4-operator-scripts\") pod \"glance-db-create-jp2pg\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.290746 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdb8944a-abc2-4787-949b-fea63d4eba70-operator-scripts\") pod \"glance-a08e-account-create-update-27n5p\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.309796 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qdxf\" (UniqueName: \"kubernetes.io/projected/6c98cbc3-a049-4544-b3c0-48a74ec08df4-kube-api-access-4qdxf\") pod \"glance-db-create-jp2pg\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.309796 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl49m\" (UniqueName: \"kubernetes.io/projected/cdb8944a-abc2-4787-949b-fea63d4eba70-kube-api-access-fl49m\") pod \"glance-a08e-account-create-update-27n5p\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.349364 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.435108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:02 crc kubenswrapper[4857]: I1128 15:03:02.838801 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-jp2pg"] Nov 28 15:03:03 crc kubenswrapper[4857]: W1128 15:03:03.048400 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcdb8944a_abc2_4787_949b_fea63d4eba70.slice/crio-6d644eae1e745d37a211d33fb16cca3848ab7d823bcc0a3a454fccb582358bbe WatchSource:0}: Error finding container 6d644eae1e745d37a211d33fb16cca3848ab7d823bcc0a3a454fccb582358bbe: Status 404 returned error can't find the container with id 6d644eae1e745d37a211d33fb16cca3848ab7d823bcc0a3a454fccb582358bbe Nov 28 15:03:03 crc kubenswrapper[4857]: I1128 15:03:03.048869 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-a08e-account-create-update-27n5p"] Nov 28 15:03:03 crc kubenswrapper[4857]: I1128 15:03:03.063339 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a08e-account-create-update-27n5p" event={"ID":"cdb8944a-abc2-4787-949b-fea63d4eba70","Type":"ContainerStarted","Data":"6d644eae1e745d37a211d33fb16cca3848ab7d823bcc0a3a454fccb582358bbe"} Nov 28 15:03:03 crc kubenswrapper[4857]: I1128 15:03:03.065610 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-jp2pg" event={"ID":"6c98cbc3-a049-4544-b3c0-48a74ec08df4","Type":"ContainerStarted","Data":"e037644e828981be12dc6bec2a5c9f6c6cc372d7127f22cff367c28cd052b1c8"} Nov 28 15:03:03 crc kubenswrapper[4857]: I1128 15:03:03.065673 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-jp2pg" event={"ID":"6c98cbc3-a049-4544-b3c0-48a74ec08df4","Type":"ContainerStarted","Data":"0d9f7eb7981b0494324fcce013cfb8a64bd2f09a07a4216201f0722aa5262cf1"} Nov 28 15:03:03 crc kubenswrapper[4857]: I1128 15:03:03.090490 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-jp2pg" podStartSLOduration=1.0904600150000001 podStartE2EDuration="1.090460015s" podCreationTimestamp="2025-11-28 15:03:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:03.078348181 +0000 UTC m=+5633.202289618" watchObservedRunningTime="2025-11-28 15:03:03.090460015 +0000 UTC m=+5633.214401462" Nov 28 15:03:04 crc kubenswrapper[4857]: I1128 15:03:04.078840 4857 generic.go:334] "Generic (PLEG): container finished" podID="6c98cbc3-a049-4544-b3c0-48a74ec08df4" containerID="e037644e828981be12dc6bec2a5c9f6c6cc372d7127f22cff367c28cd052b1c8" exitCode=0 Nov 28 15:03:04 crc kubenswrapper[4857]: I1128 15:03:04.079017 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-jp2pg" event={"ID":"6c98cbc3-a049-4544-b3c0-48a74ec08df4","Type":"ContainerDied","Data":"e037644e828981be12dc6bec2a5c9f6c6cc372d7127f22cff367c28cd052b1c8"} Nov 28 15:03:04 crc kubenswrapper[4857]: I1128 15:03:04.081064 4857 generic.go:334] "Generic (PLEG): container finished" podID="cdb8944a-abc2-4787-949b-fea63d4eba70" containerID="d80414fdb19849f61b5dec754fb96f10a2b88338202789c3582b8678a86da348" exitCode=0 Nov 28 15:03:04 crc kubenswrapper[4857]: I1128 15:03:04.081177 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a08e-account-create-update-27n5p" event={"ID":"cdb8944a-abc2-4787-949b-fea63d4eba70","Type":"ContainerDied","Data":"d80414fdb19849f61b5dec754fb96f10a2b88338202789c3582b8678a86da348"} Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.575169 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.579654 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.677127 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qdxf\" (UniqueName: \"kubernetes.io/projected/6c98cbc3-a049-4544-b3c0-48a74ec08df4-kube-api-access-4qdxf\") pod \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.677218 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c98cbc3-a049-4544-b3c0-48a74ec08df4-operator-scripts\") pod \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\" (UID: \"6c98cbc3-a049-4544-b3c0-48a74ec08df4\") " Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.677272 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdb8944a-abc2-4787-949b-fea63d4eba70-operator-scripts\") pod \"cdb8944a-abc2-4787-949b-fea63d4eba70\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.677305 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fl49m\" (UniqueName: \"kubernetes.io/projected/cdb8944a-abc2-4787-949b-fea63d4eba70-kube-api-access-fl49m\") pod \"cdb8944a-abc2-4787-949b-fea63d4eba70\" (UID: \"cdb8944a-abc2-4787-949b-fea63d4eba70\") " Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.678339 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdb8944a-abc2-4787-949b-fea63d4eba70-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cdb8944a-abc2-4787-949b-fea63d4eba70" (UID: "cdb8944a-abc2-4787-949b-fea63d4eba70"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.678350 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c98cbc3-a049-4544-b3c0-48a74ec08df4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6c98cbc3-a049-4544-b3c0-48a74ec08df4" (UID: "6c98cbc3-a049-4544-b3c0-48a74ec08df4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.678752 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6c98cbc3-a049-4544-b3c0-48a74ec08df4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.678766 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cdb8944a-abc2-4787-949b-fea63d4eba70-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.684349 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c98cbc3-a049-4544-b3c0-48a74ec08df4-kube-api-access-4qdxf" (OuterVolumeSpecName: "kube-api-access-4qdxf") pod "6c98cbc3-a049-4544-b3c0-48a74ec08df4" (UID: "6c98cbc3-a049-4544-b3c0-48a74ec08df4"). InnerVolumeSpecName "kube-api-access-4qdxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.684541 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdb8944a-abc2-4787-949b-fea63d4eba70-kube-api-access-fl49m" (OuterVolumeSpecName: "kube-api-access-fl49m") pod "cdb8944a-abc2-4787-949b-fea63d4eba70" (UID: "cdb8944a-abc2-4787-949b-fea63d4eba70"). InnerVolumeSpecName "kube-api-access-fl49m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.780833 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fl49m\" (UniqueName: \"kubernetes.io/projected/cdb8944a-abc2-4787-949b-fea63d4eba70-kube-api-access-fl49m\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:05 crc kubenswrapper[4857]: I1128 15:03:05.780869 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qdxf\" (UniqueName: \"kubernetes.io/projected/6c98cbc3-a049-4544-b3c0-48a74ec08df4-kube-api-access-4qdxf\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:06 crc kubenswrapper[4857]: I1128 15:03:06.103708 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-a08e-account-create-update-27n5p" event={"ID":"cdb8944a-abc2-4787-949b-fea63d4eba70","Type":"ContainerDied","Data":"6d644eae1e745d37a211d33fb16cca3848ab7d823bcc0a3a454fccb582358bbe"} Nov 28 15:03:06 crc kubenswrapper[4857]: I1128 15:03:06.103768 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-a08e-account-create-update-27n5p" Nov 28 15:03:06 crc kubenswrapper[4857]: I1128 15:03:06.103785 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d644eae1e745d37a211d33fb16cca3848ab7d823bcc0a3a454fccb582358bbe" Nov 28 15:03:06 crc kubenswrapper[4857]: I1128 15:03:06.106470 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-jp2pg" event={"ID":"6c98cbc3-a049-4544-b3c0-48a74ec08df4","Type":"ContainerDied","Data":"0d9f7eb7981b0494324fcce013cfb8a64bd2f09a07a4216201f0722aa5262cf1"} Nov 28 15:03:06 crc kubenswrapper[4857]: I1128 15:03:06.106520 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d9f7eb7981b0494324fcce013cfb8a64bd2f09a07a4216201f0722aa5262cf1" Nov 28 15:03:06 crc kubenswrapper[4857]: I1128 15:03:06.106555 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-jp2pg" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.308566 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-m2gp4"] Nov 28 15:03:07 crc kubenswrapper[4857]: E1128 15:03:07.309419 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c98cbc3-a049-4544-b3c0-48a74ec08df4" containerName="mariadb-database-create" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.309437 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c98cbc3-a049-4544-b3c0-48a74ec08df4" containerName="mariadb-database-create" Nov 28 15:03:07 crc kubenswrapper[4857]: E1128 15:03:07.309465 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdb8944a-abc2-4787-949b-fea63d4eba70" containerName="mariadb-account-create-update" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.309474 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdb8944a-abc2-4787-949b-fea63d4eba70" containerName="mariadb-account-create-update" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.309680 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdb8944a-abc2-4787-949b-fea63d4eba70" containerName="mariadb-account-create-update" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.309707 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c98cbc3-a049-4544-b3c0-48a74ec08df4" containerName="mariadb-database-create" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.310507 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.317127 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.317574 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4992l" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.322285 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-m2gp4"] Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.415057 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-config-data\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.415213 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-db-sync-config-data\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.415374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kc4m\" (UniqueName: \"kubernetes.io/projected/ea639f28-3b3c-4789-ad0c-4ae92923cb07-kube-api-access-8kc4m\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.415458 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-combined-ca-bundle\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.517556 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-config-data\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.517628 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-db-sync-config-data\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.517727 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kc4m\" (UniqueName: \"kubernetes.io/projected/ea639f28-3b3c-4789-ad0c-4ae92923cb07-kube-api-access-8kc4m\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.517785 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-combined-ca-bundle\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.525766 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-config-data\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.526680 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-combined-ca-bundle\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.527626 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-db-sync-config-data\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.546807 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kc4m\" (UniqueName: \"kubernetes.io/projected/ea639f28-3b3c-4789-ad0c-4ae92923cb07-kube-api-access-8kc4m\") pod \"glance-db-sync-m2gp4\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:07 crc kubenswrapper[4857]: I1128 15:03:07.632509 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:08 crc kubenswrapper[4857]: I1128 15:03:08.001009 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-m2gp4"] Nov 28 15:03:08 crc kubenswrapper[4857]: W1128 15:03:08.014175 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podea639f28_3b3c_4789_ad0c_4ae92923cb07.slice/crio-6e14a7ce3e8ea3d5144955ff83dae2f3593e492aeb35dbef5c7c8237c011e181 WatchSource:0}: Error finding container 6e14a7ce3e8ea3d5144955ff83dae2f3593e492aeb35dbef5c7c8237c011e181: Status 404 returned error can't find the container with id 6e14a7ce3e8ea3d5144955ff83dae2f3593e492aeb35dbef5c7c8237c011e181 Nov 28 15:03:08 crc kubenswrapper[4857]: I1128 15:03:08.129698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m2gp4" event={"ID":"ea639f28-3b3c-4789-ad0c-4ae92923cb07","Type":"ContainerStarted","Data":"6e14a7ce3e8ea3d5144955ff83dae2f3593e492aeb35dbef5c7c8237c011e181"} Nov 28 15:03:09 crc kubenswrapper[4857]: I1128 15:03:09.140934 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m2gp4" event={"ID":"ea639f28-3b3c-4789-ad0c-4ae92923cb07","Type":"ContainerStarted","Data":"1d0804c4663772502e2852805fbb2fb371c04769987f092f6024c50e6b59e167"} Nov 28 15:03:09 crc kubenswrapper[4857]: I1128 15:03:09.160890 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-m2gp4" podStartSLOduration=2.160870479 podStartE2EDuration="2.160870479s" podCreationTimestamp="2025-11-28 15:03:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:09.154648343 +0000 UTC m=+5639.278589790" watchObservedRunningTime="2025-11-28 15:03:09.160870479 +0000 UTC m=+5639.284811916" Nov 28 15:03:11 crc kubenswrapper[4857]: I1128 15:03:11.309372 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:03:11 crc kubenswrapper[4857]: I1128 15:03:11.309767 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:03:11 crc kubenswrapper[4857]: I1128 15:03:11.309834 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:03:11 crc kubenswrapper[4857]: I1128 15:03:11.310848 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e74738a0b8d5bffa43f61ec8bf86cdc9bce08ade2ead6c33503f5aba9862d3f0"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:03:11 crc kubenswrapper[4857]: I1128 15:03:11.310974 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://e74738a0b8d5bffa43f61ec8bf86cdc9bce08ade2ead6c33503f5aba9862d3f0" gracePeriod=600 Nov 28 15:03:12 crc kubenswrapper[4857]: I1128 15:03:12.171566 4857 generic.go:334] "Generic (PLEG): container finished" podID="ea639f28-3b3c-4789-ad0c-4ae92923cb07" containerID="1d0804c4663772502e2852805fbb2fb371c04769987f092f6024c50e6b59e167" exitCode=0 Nov 28 15:03:12 crc kubenswrapper[4857]: I1128 15:03:12.171679 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m2gp4" event={"ID":"ea639f28-3b3c-4789-ad0c-4ae92923cb07","Type":"ContainerDied","Data":"1d0804c4663772502e2852805fbb2fb371c04769987f092f6024c50e6b59e167"} Nov 28 15:03:12 crc kubenswrapper[4857]: I1128 15:03:12.175766 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="e74738a0b8d5bffa43f61ec8bf86cdc9bce08ade2ead6c33503f5aba9862d3f0" exitCode=0 Nov 28 15:03:12 crc kubenswrapper[4857]: I1128 15:03:12.175806 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"e74738a0b8d5bffa43f61ec8bf86cdc9bce08ade2ead6c33503f5aba9862d3f0"} Nov 28 15:03:12 crc kubenswrapper[4857]: I1128 15:03:12.175841 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1"} Nov 28 15:03:12 crc kubenswrapper[4857]: I1128 15:03:12.175861 4857 scope.go:117] "RemoveContainer" containerID="3e9d36f7893d9f1cc6753c2afab65986137c59e8ac6eb66a5f0a1735633d260e" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.664442 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.852587 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-combined-ca-bundle\") pod \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.853451 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8kc4m\" (UniqueName: \"kubernetes.io/projected/ea639f28-3b3c-4789-ad0c-4ae92923cb07-kube-api-access-8kc4m\") pod \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.853484 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-db-sync-config-data\") pod \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.854249 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-config-data\") pod \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\" (UID: \"ea639f28-3b3c-4789-ad0c-4ae92923cb07\") " Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.872717 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ea639f28-3b3c-4789-ad0c-4ae92923cb07" (UID: "ea639f28-3b3c-4789-ad0c-4ae92923cb07"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.875123 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea639f28-3b3c-4789-ad0c-4ae92923cb07-kube-api-access-8kc4m" (OuterVolumeSpecName: "kube-api-access-8kc4m") pod "ea639f28-3b3c-4789-ad0c-4ae92923cb07" (UID: "ea639f28-3b3c-4789-ad0c-4ae92923cb07"). InnerVolumeSpecName "kube-api-access-8kc4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.913259 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea639f28-3b3c-4789-ad0c-4ae92923cb07" (UID: "ea639f28-3b3c-4789-ad0c-4ae92923cb07"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.956131 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8kc4m\" (UniqueName: \"kubernetes.io/projected/ea639f28-3b3c-4789-ad0c-4ae92923cb07-kube-api-access-8kc4m\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.956169 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.956179 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:13 crc kubenswrapper[4857]: I1128 15:03:13.989123 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-config-data" (OuterVolumeSpecName: "config-data") pod "ea639f28-3b3c-4789-ad0c-4ae92923cb07" (UID: "ea639f28-3b3c-4789-ad0c-4ae92923cb07"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.058323 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea639f28-3b3c-4789-ad0c-4ae92923cb07-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.220284 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-m2gp4" event={"ID":"ea639f28-3b3c-4789-ad0c-4ae92923cb07","Type":"ContainerDied","Data":"6e14a7ce3e8ea3d5144955ff83dae2f3593e492aeb35dbef5c7c8237c011e181"} Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.220348 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6e14a7ce3e8ea3d5144955ff83dae2f3593e492aeb35dbef5c7c8237c011e181" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.220365 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-m2gp4" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.542684 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:14 crc kubenswrapper[4857]: E1128 15:03:14.543080 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea639f28-3b3c-4789-ad0c-4ae92923cb07" containerName="glance-db-sync" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.543098 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea639f28-3b3c-4789-ad0c-4ae92923cb07" containerName="glance-db-sync" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.543256 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea639f28-3b3c-4789-ad0c-4ae92923cb07" containerName="glance-db-sync" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.544152 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.547492 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceph-conf-files" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.547558 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.547591 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.547645 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4992l" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.565101 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.671031 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-config-data\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.672210 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsmnp\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-kube-api-access-gsmnp\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.672376 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-logs\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.672472 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.672553 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-scripts\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.672640 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.672743 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-ceph\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.696394 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-786fb9969c-gmprd"] Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.698085 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.716865 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-786fb9969c-gmprd"] Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.771187 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.772851 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774154 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-nb\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774193 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-config\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774226 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-logs\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774253 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774271 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-dns-svc\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774292 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-sb\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774308 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-scripts\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774364 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-ceph\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774387 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-config-data\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774439 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsmnp\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-kube-api-access-gsmnp\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.774477 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lrkdm\" (UniqueName: \"kubernetes.io/projected/ef8e7922-cc0b-47b6-a51f-cebe690c264d-kube-api-access-lrkdm\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.775022 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-logs\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.775260 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.782929 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-scripts\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.783306 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.784444 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-ceph\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.785259 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.790126 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.828246 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-config-data\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.837899 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsmnp\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-kube-api-access-gsmnp\") pod \"glance-default-external-api-0\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.862663 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880490 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-config\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880571 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880599 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-dns-svc\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880619 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-sb\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880637 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880667 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880697 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-ceph\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880728 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880747 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-logs\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880765 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92hj9\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-kube-api-access-92hj9\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880840 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lrkdm\" (UniqueName: \"kubernetes.io/projected/ef8e7922-cc0b-47b6-a51f-cebe690c264d-kube-api-access-lrkdm\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.880862 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-nb\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.881726 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-nb\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.881809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-config\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.882390 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-dns-svc\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.882619 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-sb\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.903067 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lrkdm\" (UniqueName: \"kubernetes.io/projected/ef8e7922-cc0b-47b6-a51f-cebe690c264d-kube-api-access-lrkdm\") pod \"dnsmasq-dns-786fb9969c-gmprd\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982760 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982810 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982840 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-ceph\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982903 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982919 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-logs\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.982937 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92hj9\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-kube-api-access-92hj9\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.985579 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.985812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-logs\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.988563 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-ceph\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.989600 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.990289 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:14 crc kubenswrapper[4857]: I1128 15:03:14.992583 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.005748 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92hj9\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-kube-api-access-92hj9\") pod \"glance-default-internal-api-0\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.028267 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.221617 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.460787 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:15 crc kubenswrapper[4857]: W1128 15:03:15.463541 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod89398f9f_95c2_4266_b9e7_75721c227ccf.slice/crio-7f35f407b33164e7a5116eea64a8b9dd0f3b13156d3dfd5bf03442c6c914126c WatchSource:0}: Error finding container 7f35f407b33164e7a5116eea64a8b9dd0f3b13156d3dfd5bf03442c6c914126c: Status 404 returned error can't find the container with id 7f35f407b33164e7a5116eea64a8b9dd0f3b13156d3dfd5bf03442c6c914126c Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.531019 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-786fb9969c-gmprd"] Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.787171 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:15 crc kubenswrapper[4857]: I1128 15:03:15.878859 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:15 crc kubenswrapper[4857]: W1128 15:03:15.948347 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod59244dbe_c5bc_42a4_b309_136c5373655d.slice/crio-8574040e00270526f690bd0c0c956344b00fe6ff392dfeb6d690b33d7fd32f6b WatchSource:0}: Error finding container 8574040e00270526f690bd0c0c956344b00fe6ff392dfeb6d690b33d7fd32f6b: Status 404 returned error can't find the container with id 8574040e00270526f690bd0c0c956344b00fe6ff392dfeb6d690b33d7fd32f6b Nov 28 15:03:16 crc kubenswrapper[4857]: I1128 15:03:16.258416 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"59244dbe-c5bc-42a4-b309-136c5373655d","Type":"ContainerStarted","Data":"8574040e00270526f690bd0c0c956344b00fe6ff392dfeb6d690b33d7fd32f6b"} Nov 28 15:03:16 crc kubenswrapper[4857]: I1128 15:03:16.260933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"89398f9f-95c2-4266-b9e7-75721c227ccf","Type":"ContainerStarted","Data":"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513"} Nov 28 15:03:16 crc kubenswrapper[4857]: I1128 15:03:16.260988 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"89398f9f-95c2-4266-b9e7-75721c227ccf","Type":"ContainerStarted","Data":"7f35f407b33164e7a5116eea64a8b9dd0f3b13156d3dfd5bf03442c6c914126c"} Nov 28 15:03:16 crc kubenswrapper[4857]: I1128 15:03:16.262630 4857 generic.go:334] "Generic (PLEG): container finished" podID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerID="e3639b1779810b9f09d8e2761ef459bfb05301e405f26b3ae36230c5390c326e" exitCode=0 Nov 28 15:03:16 crc kubenswrapper[4857]: I1128 15:03:16.262667 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" event={"ID":"ef8e7922-cc0b-47b6-a51f-cebe690c264d","Type":"ContainerDied","Data":"e3639b1779810b9f09d8e2761ef459bfb05301e405f26b3ae36230c5390c326e"} Nov 28 15:03:16 crc kubenswrapper[4857]: I1128 15:03:16.262684 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" event={"ID":"ef8e7922-cc0b-47b6-a51f-cebe690c264d","Type":"ContainerStarted","Data":"e785f0d3ab2a0f3ef7aeaeca8ec7716733ea2976926ee005b5b686491ab05d92"} Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.275203 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"59244dbe-c5bc-42a4-b309-136c5373655d","Type":"ContainerStarted","Data":"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6"} Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.276245 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"59244dbe-c5bc-42a4-b309-136c5373655d","Type":"ContainerStarted","Data":"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5"} Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.281883 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-log" containerID="cri-o://ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513" gracePeriod=30 Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.282157 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"89398f9f-95c2-4266-b9e7-75721c227ccf","Type":"ContainerStarted","Data":"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7"} Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.282239 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-httpd" containerID="cri-o://ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7" gracePeriod=30 Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.284592 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" event={"ID":"ef8e7922-cc0b-47b6-a51f-cebe690c264d","Type":"ContainerStarted","Data":"a9977b5aff16d66941afedf66511b2934faf30314b268b2ac5b8de5ea96b8f8e"} Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.285574 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.303010 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.30298621 podStartE2EDuration="3.30298621s" podCreationTimestamp="2025-11-28 15:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:17.295736557 +0000 UTC m=+5647.419678024" watchObservedRunningTime="2025-11-28 15:03:17.30298621 +0000 UTC m=+5647.426927647" Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.328233 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" podStartSLOduration=3.328213884 podStartE2EDuration="3.328213884s" podCreationTimestamp="2025-11-28 15:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:17.324414383 +0000 UTC m=+5647.448355820" watchObservedRunningTime="2025-11-28 15:03:17.328213884 +0000 UTC m=+5647.452155311" Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.347742 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.347714625 podStartE2EDuration="3.347714625s" podCreationTimestamp="2025-11-28 15:03:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:17.345229139 +0000 UTC m=+5647.469170576" watchObservedRunningTime="2025-11-28 15:03:17.347714625 +0000 UTC m=+5647.471656062" Nov 28 15:03:17 crc kubenswrapper[4857]: I1128 15:03:17.976891 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.067829 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-combined-ca-bundle\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.067891 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsmnp\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-kube-api-access-gsmnp\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.067964 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-scripts\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.068026 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-httpd-run\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.068058 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-ceph\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.068119 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-config-data\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.068189 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-logs\") pod \"89398f9f-95c2-4266-b9e7-75721c227ccf\" (UID: \"89398f9f-95c2-4266-b9e7-75721c227ccf\") " Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.068593 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.068620 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-logs" (OuterVolumeSpecName: "logs") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.069071 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.069099 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/89398f9f-95c2-4266-b9e7-75721c227ccf-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.076660 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-scripts" (OuterVolumeSpecName: "scripts") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.077847 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-ceph" (OuterVolumeSpecName: "ceph") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.080510 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-kube-api-access-gsmnp" (OuterVolumeSpecName: "kube-api-access-gsmnp") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "kube-api-access-gsmnp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.106162 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.128183 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-config-data" (OuterVolumeSpecName: "config-data") pod "89398f9f-95c2-4266-b9e7-75721c227ccf" (UID: "89398f9f-95c2-4266-b9e7-75721c227ccf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.170612 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.170651 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.170665 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsmnp\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-kube-api-access-gsmnp\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.170675 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/89398f9f-95c2-4266-b9e7-75721c227ccf-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.170684 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/89398f9f-95c2-4266-b9e7-75721c227ccf-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296347 4857 generic.go:334] "Generic (PLEG): container finished" podID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerID="ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7" exitCode=0 Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296385 4857 generic.go:334] "Generic (PLEG): container finished" podID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerID="ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513" exitCode=143 Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296469 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"89398f9f-95c2-4266-b9e7-75721c227ccf","Type":"ContainerDied","Data":"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7"} Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296558 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"89398f9f-95c2-4266-b9e7-75721c227ccf","Type":"ContainerDied","Data":"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513"} Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296575 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"89398f9f-95c2-4266-b9e7-75721c227ccf","Type":"ContainerDied","Data":"7f35f407b33164e7a5116eea64a8b9dd0f3b13156d3dfd5bf03442c6c914126c"} Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.296594 4857 scope.go:117] "RemoveContainer" containerID="ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.333386 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.337380 4857 scope.go:117] "RemoveContainer" containerID="ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.342041 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.366753 4857 scope.go:117] "RemoveContainer" containerID="ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7" Nov 28 15:03:18 crc kubenswrapper[4857]: E1128 15:03:18.367286 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7\": container with ID starting with ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7 not found: ID does not exist" containerID="ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.367347 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7"} err="failed to get container status \"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7\": rpc error: code = NotFound desc = could not find container \"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7\": container with ID starting with ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7 not found: ID does not exist" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.367387 4857 scope.go:117] "RemoveContainer" containerID="ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513" Nov 28 15:03:18 crc kubenswrapper[4857]: E1128 15:03:18.367744 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513\": container with ID starting with ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513 not found: ID does not exist" containerID="ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.367785 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513"} err="failed to get container status \"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513\": rpc error: code = NotFound desc = could not find container \"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513\": container with ID starting with ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513 not found: ID does not exist" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.367818 4857 scope.go:117] "RemoveContainer" containerID="ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.368252 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7"} err="failed to get container status \"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7\": rpc error: code = NotFound desc = could not find container \"ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7\": container with ID starting with ddc92d5f5d9f00022b083bc19accef5c95dc33ac84c4ab6c79cf63b314f669c7 not found: ID does not exist" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.368283 4857 scope.go:117] "RemoveContainer" containerID="ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.368631 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513"} err="failed to get container status \"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513\": rpc error: code = NotFound desc = could not find container \"ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513\": container with ID starting with ebdde4ec0a85c164e3cc088f4cc1db168f64979d79f3fc13510c4026de571513 not found: ID does not exist" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.373700 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:18 crc kubenswrapper[4857]: E1128 15:03:18.374106 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-log" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.374126 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-log" Nov 28 15:03:18 crc kubenswrapper[4857]: E1128 15:03:18.374142 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-httpd" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.374149 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-httpd" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.374691 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-httpd" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.374719 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" containerName="glance-log" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.375667 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.378857 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.398291 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.412275 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.477581 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ml7v\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-kube-api-access-2ml7v\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.477765 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-ceph\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.477831 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.478053 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-scripts\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.478116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-logs\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.478248 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.478323 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-config-data\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580047 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ml7v\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-kube-api-access-2ml7v\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580138 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-ceph\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580178 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580241 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-scripts\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580266 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-logs\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580306 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.580334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-config-data\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.581030 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-logs\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.581091 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.585979 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-scripts\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.586523 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-ceph\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.587138 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.592221 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-config-data\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.598863 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ml7v\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-kube-api-access-2ml7v\") pod \"glance-default-external-api-0\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " pod="openstack/glance-default-external-api-0" Nov 28 15:03:18 crc kubenswrapper[4857]: I1128 15:03:18.694442 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:03:19 crc kubenswrapper[4857]: I1128 15:03:19.300679 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:03:19 crc kubenswrapper[4857]: W1128 15:03:19.304107 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcf2fe1ae_4f37_46dc_9d52_61b1df5f5fc7.slice/crio-c55680c5ca1b315726fb28e2b61e1047676c2bde171c1a58fa7704e0fe5ec42a WatchSource:0}: Error finding container c55680c5ca1b315726fb28e2b61e1047676c2bde171c1a58fa7704e0fe5ec42a: Status 404 returned error can't find the container with id c55680c5ca1b315726fb28e2b61e1047676c2bde171c1a58fa7704e0fe5ec42a Nov 28 15:03:19 crc kubenswrapper[4857]: I1128 15:03:19.320119 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-log" containerID="cri-o://aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5" gracePeriod=30 Nov 28 15:03:19 crc kubenswrapper[4857]: I1128 15:03:19.320310 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-httpd" containerID="cri-o://01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6" gracePeriod=30 Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.002527 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.113434 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-logs\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.113892 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-combined-ca-bundle\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.113825 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-logs" (OuterVolumeSpecName: "logs") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.114307 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92hj9\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-kube-api-access-92hj9\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.114359 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-config-data\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.114461 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-ceph\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.114486 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-httpd-run\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.114519 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-scripts\") pod \"59244dbe-c5bc-42a4-b309-136c5373655d\" (UID: \"59244dbe-c5bc-42a4-b309-136c5373655d\") " Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.115024 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.119283 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-scripts" (OuterVolumeSpecName: "scripts") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.119316 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.120324 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-kube-api-access-92hj9" (OuterVolumeSpecName: "kube-api-access-92hj9") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "kube-api-access-92hj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.121137 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-ceph" (OuterVolumeSpecName: "ceph") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.154787 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.178332 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-config-data" (OuterVolumeSpecName: "config-data") pod "59244dbe-c5bc-42a4-b309-136c5373655d" (UID: "59244dbe-c5bc-42a4-b309-136c5373655d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.216121 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.216162 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92hj9\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-kube-api-access-92hj9\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.216180 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.216191 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/59244dbe-c5bc-42a4-b309-136c5373655d-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.216204 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/59244dbe-c5bc-42a4-b309-136c5373655d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.216214 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/59244dbe-c5bc-42a4-b309-136c5373655d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.276808 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89398f9f-95c2-4266-b9e7-75721c227ccf" path="/var/lib/kubelet/pods/89398f9f-95c2-4266-b9e7-75721c227ccf/volumes" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.332437 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7","Type":"ContainerStarted","Data":"16c1e790752c7280a64ad482220e23888921db72437b11a041d8fb9cfad8945c"} Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.332491 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7","Type":"ContainerStarted","Data":"c55680c5ca1b315726fb28e2b61e1047676c2bde171c1a58fa7704e0fe5ec42a"} Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334201 4857 generic.go:334] "Generic (PLEG): container finished" podID="59244dbe-c5bc-42a4-b309-136c5373655d" containerID="01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6" exitCode=0 Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334230 4857 generic.go:334] "Generic (PLEG): container finished" podID="59244dbe-c5bc-42a4-b309-136c5373655d" containerID="aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5" exitCode=143 Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334247 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"59244dbe-c5bc-42a4-b309-136c5373655d","Type":"ContainerDied","Data":"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6"} Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334266 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"59244dbe-c5bc-42a4-b309-136c5373655d","Type":"ContainerDied","Data":"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5"} Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334278 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"59244dbe-c5bc-42a4-b309-136c5373655d","Type":"ContainerDied","Data":"8574040e00270526f690bd0c0c956344b00fe6ff392dfeb6d690b33d7fd32f6b"} Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334296 4857 scope.go:117] "RemoveContainer" containerID="01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.334444 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.363653 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.375317 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.382517 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:20 crc kubenswrapper[4857]: E1128 15:03:20.382874 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-log" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.382891 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-log" Nov 28 15:03:20 crc kubenswrapper[4857]: E1128 15:03:20.382922 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-httpd" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.382930 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-httpd" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.383142 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-log" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.383246 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" containerName="glance-httpd" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.385634 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.386481 4857 scope.go:117] "RemoveContainer" containerID="aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.392243 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.403243 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.425713 4857 scope.go:117] "RemoveContainer" containerID="01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6" Nov 28 15:03:20 crc kubenswrapper[4857]: E1128 15:03:20.430481 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6\": container with ID starting with 01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6 not found: ID does not exist" containerID="01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.430546 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6"} err="failed to get container status \"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6\": rpc error: code = NotFound desc = could not find container \"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6\": container with ID starting with 01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6 not found: ID does not exist" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.430585 4857 scope.go:117] "RemoveContainer" containerID="aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5" Nov 28 15:03:20 crc kubenswrapper[4857]: E1128 15:03:20.436662 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5\": container with ID starting with aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5 not found: ID does not exist" containerID="aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.436691 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5"} err="failed to get container status \"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5\": rpc error: code = NotFound desc = could not find container \"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5\": container with ID starting with aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5 not found: ID does not exist" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.436713 4857 scope.go:117] "RemoveContainer" containerID="01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.437098 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6"} err="failed to get container status \"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6\": rpc error: code = NotFound desc = could not find container \"01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6\": container with ID starting with 01a8d4700b57918b48faddae8d889cb6a7170c0d7598bed69d63c86530bedce6 not found: ID does not exist" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.437153 4857 scope.go:117] "RemoveContainer" containerID="aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.437802 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5"} err="failed to get container status \"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5\": rpc error: code = NotFound desc = could not find container \"aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5\": container with ID starting with aa4df347ee9a90e7db9e9fd27440687f82e436a60f5cdf1e12e847b03f232dc5 not found: ID does not exist" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.524386 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-logs\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.524837 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.525131 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s8jd5\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-kube-api-access-s8jd5\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.525358 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.525576 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.525777 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.526010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.627916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.628050 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s8jd5\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-kube-api-access-s8jd5\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.628106 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.628135 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.628159 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.628193 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.628248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-logs\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.629119 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.629151 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-logs\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.634442 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-ceph\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.634689 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.634533 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.635760 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.642720 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s8jd5\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-kube-api-access-s8jd5\") pod \"glance-default-internal-api-0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:03:20 crc kubenswrapper[4857]: I1128 15:03:20.704280 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:21 crc kubenswrapper[4857]: I1128 15:03:21.347342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7","Type":"ContainerStarted","Data":"5d6fe1a0c601b448ca204da2df776fd586604ead34e0b8cdb4f9e2c73083753a"} Nov 28 15:03:21 crc kubenswrapper[4857]: I1128 15:03:21.394546 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.394519628 podStartE2EDuration="3.394519628s" podCreationTimestamp="2025-11-28 15:03:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:21.384651825 +0000 UTC m=+5651.508593292" watchObservedRunningTime="2025-11-28 15:03:21.394519628 +0000 UTC m=+5651.518461075" Nov 28 15:03:21 crc kubenswrapper[4857]: W1128 15:03:21.467677 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dc2df72_1be4_4bad_8282_bb1b65226de0.slice/crio-d62232f9652290721f668239db343d348023961c72df0a723e06f9819c3875c9 WatchSource:0}: Error finding container d62232f9652290721f668239db343d348023961c72df0a723e06f9819c3875c9: Status 404 returned error can't find the container with id d62232f9652290721f668239db343d348023961c72df0a723e06f9819c3875c9 Nov 28 15:03:21 crc kubenswrapper[4857]: I1128 15:03:21.468147 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:03:22 crc kubenswrapper[4857]: I1128 15:03:22.246847 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59244dbe-c5bc-42a4-b309-136c5373655d" path="/var/lib/kubelet/pods/59244dbe-c5bc-42a4-b309-136c5373655d/volumes" Nov 28 15:03:22 crc kubenswrapper[4857]: I1128 15:03:22.371439 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5dc2df72-1be4-4bad-8282-bb1b65226de0","Type":"ContainerStarted","Data":"e2e67b60862be42ab9c310508d71414a992de3cb1c03272009bbe6432e6cccd1"} Nov 28 15:03:22 crc kubenswrapper[4857]: I1128 15:03:22.371485 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5dc2df72-1be4-4bad-8282-bb1b65226de0","Type":"ContainerStarted","Data":"d62232f9652290721f668239db343d348023961c72df0a723e06f9819c3875c9"} Nov 28 15:03:23 crc kubenswrapper[4857]: I1128 15:03:23.388877 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5dc2df72-1be4-4bad-8282-bb1b65226de0","Type":"ContainerStarted","Data":"fd53eaaab460ddc9dbffd5a200e30daab396566fb89befa6db79d514fad93ab7"} Nov 28 15:03:23 crc kubenswrapper[4857]: I1128 15:03:23.439086 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.439055868 podStartE2EDuration="3.439055868s" podCreationTimestamp="2025-11-28 15:03:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:23.426718509 +0000 UTC m=+5653.550659946" watchObservedRunningTime="2025-11-28 15:03:23.439055868 +0000 UTC m=+5653.562997325" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.030299 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.101448 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf"] Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.101797 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerName="dnsmasq-dns" containerID="cri-o://f559284e524f99cf9527e4cbdfcbd3901150dffd09fafb10b4a5c0317c6619f9" gracePeriod=10 Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.447784 4857 generic.go:334] "Generic (PLEG): container finished" podID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerID="f559284e524f99cf9527e4cbdfcbd3901150dffd09fafb10b4a5c0317c6619f9" exitCode=0 Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.448208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" event={"ID":"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b","Type":"ContainerDied","Data":"f559284e524f99cf9527e4cbdfcbd3901150dffd09fafb10b4a5c0317c6619f9"} Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.667785 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.768579 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-dns-svc\") pod \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.768920 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-nb\") pod \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.768981 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-sb\") pod \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.769038 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lfnrt\" (UniqueName: \"kubernetes.io/projected/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-kube-api-access-lfnrt\") pod \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.769065 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-config\") pod \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\" (UID: \"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b\") " Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.776394 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-kube-api-access-lfnrt" (OuterVolumeSpecName: "kube-api-access-lfnrt") pod "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" (UID: "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b"). InnerVolumeSpecName "kube-api-access-lfnrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.830084 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" (UID: "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.830975 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" (UID: "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.834898 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-config" (OuterVolumeSpecName: "config") pod "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" (UID: "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.838932 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" (UID: "3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.871137 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.871179 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.871190 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lfnrt\" (UniqueName: \"kubernetes.io/projected/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-kube-api-access-lfnrt\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.871206 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:25 crc kubenswrapper[4857]: I1128 15:03:25.871216 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:26 crc kubenswrapper[4857]: I1128 15:03:26.460111 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" event={"ID":"3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b","Type":"ContainerDied","Data":"695b51f3eed82e0d1ce90e65dbc9ef71ca87f1389e6dc68ef5595e86c02fb9a0"} Nov 28 15:03:26 crc kubenswrapper[4857]: I1128 15:03:26.460185 4857 scope.go:117] "RemoveContainer" containerID="f559284e524f99cf9527e4cbdfcbd3901150dffd09fafb10b4a5c0317c6619f9" Nov 28 15:03:26 crc kubenswrapper[4857]: I1128 15:03:26.460228 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf" Nov 28 15:03:26 crc kubenswrapper[4857]: I1128 15:03:26.489246 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf"] Nov 28 15:03:26 crc kubenswrapper[4857]: I1128 15:03:26.493441 4857 scope.go:117] "RemoveContainer" containerID="a043b1a851b41d6405fd09d0850dcea8cd256714fce6d4c5ea53155dd4bf5759" Nov 28 15:03:26 crc kubenswrapper[4857]: I1128 15:03:26.497545 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bdb9c7ddc-d6lqf"] Nov 28 15:03:28 crc kubenswrapper[4857]: I1128 15:03:28.251545 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" path="/var/lib/kubelet/pods/3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b/volumes" Nov 28 15:03:28 crc kubenswrapper[4857]: I1128 15:03:28.695336 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:03:28 crc kubenswrapper[4857]: I1128 15:03:28.695410 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:03:28 crc kubenswrapper[4857]: I1128 15:03:28.733303 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:03:28 crc kubenswrapper[4857]: I1128 15:03:28.752346 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:03:29 crc kubenswrapper[4857]: I1128 15:03:29.499825 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:03:29 crc kubenswrapper[4857]: I1128 15:03:29.500842 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:03:30 crc kubenswrapper[4857]: I1128 15:03:30.704793 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:30 crc kubenswrapper[4857]: I1128 15:03:30.706819 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:30 crc kubenswrapper[4857]: I1128 15:03:30.757927 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:30 crc kubenswrapper[4857]: I1128 15:03:30.780848 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:31 crc kubenswrapper[4857]: I1128 15:03:31.405466 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:03:31 crc kubenswrapper[4857]: I1128 15:03:31.412381 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:03:31 crc kubenswrapper[4857]: I1128 15:03:31.524662 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:31 crc kubenswrapper[4857]: I1128 15:03:31.524745 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:33 crc kubenswrapper[4857]: I1128 15:03:33.653374 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:33 crc kubenswrapper[4857]: I1128 15:03:33.653886 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.853887 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-j7v7h"] Nov 28 15:03:41 crc kubenswrapper[4857]: E1128 15:03:41.855397 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerName="init" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.855419 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerName="init" Nov 28 15:03:41 crc kubenswrapper[4857]: E1128 15:03:41.855455 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerName="dnsmasq-dns" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.855464 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerName="dnsmasq-dns" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.855714 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c0fdc13-c2e2-4c8c-a5ec-734b7211ef2b" containerName="dnsmasq-dns" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.856601 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.871183 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-j7v7h"] Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.916484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w558d\" (UniqueName: \"kubernetes.io/projected/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-kube-api-access-w558d\") pod \"placement-db-create-j7v7h\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.916557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-operator-scripts\") pod \"placement-db-create-j7v7h\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.975040 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-2f33-account-create-update-rbdtg"] Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.976868 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:41 crc kubenswrapper[4857]: I1128 15:03:41.986642 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.015251 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-2f33-account-create-update-rbdtg"] Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.032965 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w558d\" (UniqueName: \"kubernetes.io/projected/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-kube-api-access-w558d\") pod \"placement-db-create-j7v7h\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.033026 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-operator-scripts\") pod \"placement-db-create-j7v7h\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.033092 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c1b914f-821e-49ac-854c-3bd5df04ae08-operator-scripts\") pod \"placement-2f33-account-create-update-rbdtg\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.033124 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bhhf9\" (UniqueName: \"kubernetes.io/projected/1c1b914f-821e-49ac-854c-3bd5df04ae08-kube-api-access-bhhf9\") pod \"placement-2f33-account-create-update-rbdtg\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.034304 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-operator-scripts\") pod \"placement-db-create-j7v7h\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.080882 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w558d\" (UniqueName: \"kubernetes.io/projected/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-kube-api-access-w558d\") pod \"placement-db-create-j7v7h\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.134725 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c1b914f-821e-49ac-854c-3bd5df04ae08-operator-scripts\") pod \"placement-2f33-account-create-update-rbdtg\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.134820 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bhhf9\" (UniqueName: \"kubernetes.io/projected/1c1b914f-821e-49ac-854c-3bd5df04ae08-kube-api-access-bhhf9\") pod \"placement-2f33-account-create-update-rbdtg\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.135665 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c1b914f-821e-49ac-854c-3bd5df04ae08-operator-scripts\") pod \"placement-2f33-account-create-update-rbdtg\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.154492 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bhhf9\" (UniqueName: \"kubernetes.io/projected/1c1b914f-821e-49ac-854c-3bd5df04ae08-kube-api-access-bhhf9\") pod \"placement-2f33-account-create-update-rbdtg\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.177515 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.336007 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.648138 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-j7v7h"] Nov 28 15:03:42 crc kubenswrapper[4857]: I1128 15:03:42.798671 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-2f33-account-create-update-rbdtg"] Nov 28 15:03:42 crc kubenswrapper[4857]: W1128 15:03:42.810866 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c1b914f_821e_49ac_854c_3bd5df04ae08.slice/crio-2188693575e4f96a67ee12869f8313a48c81377c69e1e8ae0d5af065cf87cca9 WatchSource:0}: Error finding container 2188693575e4f96a67ee12869f8313a48c81377c69e1e8ae0d5af065cf87cca9: Status 404 returned error can't find the container with id 2188693575e4f96a67ee12869f8313a48c81377c69e1e8ae0d5af065cf87cca9 Nov 28 15:03:43 crc kubenswrapper[4857]: I1128 15:03:43.660480 4857 generic.go:334] "Generic (PLEG): container finished" podID="1c1b914f-821e-49ac-854c-3bd5df04ae08" containerID="dbe0531944fad816df0057021f927d051f121d14277fec85793b4feecb4dde3f" exitCode=0 Nov 28 15:03:43 crc kubenswrapper[4857]: I1128 15:03:43.660605 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-2f33-account-create-update-rbdtg" event={"ID":"1c1b914f-821e-49ac-854c-3bd5df04ae08","Type":"ContainerDied","Data":"dbe0531944fad816df0057021f927d051f121d14277fec85793b4feecb4dde3f"} Nov 28 15:03:43 crc kubenswrapper[4857]: I1128 15:03:43.661381 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-2f33-account-create-update-rbdtg" event={"ID":"1c1b914f-821e-49ac-854c-3bd5df04ae08","Type":"ContainerStarted","Data":"2188693575e4f96a67ee12869f8313a48c81377c69e1e8ae0d5af065cf87cca9"} Nov 28 15:03:43 crc kubenswrapper[4857]: I1128 15:03:43.669370 4857 generic.go:334] "Generic (PLEG): container finished" podID="cc6a025d-a031-4f1d-99fa-1aa711ec4cde" containerID="0a20cefbcbc36698209bd7c7348b6cb95cddf39ca0c2cf3ee24dcc1398f3ec01" exitCode=0 Nov 28 15:03:43 crc kubenswrapper[4857]: I1128 15:03:43.669458 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j7v7h" event={"ID":"cc6a025d-a031-4f1d-99fa-1aa711ec4cde","Type":"ContainerDied","Data":"0a20cefbcbc36698209bd7c7348b6cb95cddf39ca0c2cf3ee24dcc1398f3ec01"} Nov 28 15:03:43 crc kubenswrapper[4857]: I1128 15:03:43.669505 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j7v7h" event={"ID":"cc6a025d-a031-4f1d-99fa-1aa711ec4cde","Type":"ContainerStarted","Data":"df38479c53ffe530df98c5b8685958e223317cb414de67d3310090cf40a22b67"} Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.132469 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.138016 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.208633 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c1b914f-821e-49ac-854c-3bd5df04ae08-operator-scripts\") pod \"1c1b914f-821e-49ac-854c-3bd5df04ae08\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.208710 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bhhf9\" (UniqueName: \"kubernetes.io/projected/1c1b914f-821e-49ac-854c-3bd5df04ae08-kube-api-access-bhhf9\") pod \"1c1b914f-821e-49ac-854c-3bd5df04ae08\" (UID: \"1c1b914f-821e-49ac-854c-3bd5df04ae08\") " Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.208799 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-operator-scripts\") pod \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.208873 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w558d\" (UniqueName: \"kubernetes.io/projected/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-kube-api-access-w558d\") pod \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\" (UID: \"cc6a025d-a031-4f1d-99fa-1aa711ec4cde\") " Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.210070 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cc6a025d-a031-4f1d-99fa-1aa711ec4cde" (UID: "cc6a025d-a031-4f1d-99fa-1aa711ec4cde"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.210246 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c1b914f-821e-49ac-854c-3bd5df04ae08-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1c1b914f-821e-49ac-854c-3bd5df04ae08" (UID: "1c1b914f-821e-49ac-854c-3bd5df04ae08"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.216828 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c1b914f-821e-49ac-854c-3bd5df04ae08-kube-api-access-bhhf9" (OuterVolumeSpecName: "kube-api-access-bhhf9") pod "1c1b914f-821e-49ac-854c-3bd5df04ae08" (UID: "1c1b914f-821e-49ac-854c-3bd5df04ae08"). InnerVolumeSpecName "kube-api-access-bhhf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.217318 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-kube-api-access-w558d" (OuterVolumeSpecName: "kube-api-access-w558d") pod "cc6a025d-a031-4f1d-99fa-1aa711ec4cde" (UID: "cc6a025d-a031-4f1d-99fa-1aa711ec4cde"). InnerVolumeSpecName "kube-api-access-w558d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.312352 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.312399 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w558d\" (UniqueName: \"kubernetes.io/projected/cc6a025d-a031-4f1d-99fa-1aa711ec4cde-kube-api-access-w558d\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.312414 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1c1b914f-821e-49ac-854c-3bd5df04ae08-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.312427 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bhhf9\" (UniqueName: \"kubernetes.io/projected/1c1b914f-821e-49ac-854c-3bd5df04ae08-kube-api-access-bhhf9\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.696834 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-2f33-account-create-update-rbdtg" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.696833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-2f33-account-create-update-rbdtg" event={"ID":"1c1b914f-821e-49ac-854c-3bd5df04ae08","Type":"ContainerDied","Data":"2188693575e4f96a67ee12869f8313a48c81377c69e1e8ae0d5af065cf87cca9"} Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.697018 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2188693575e4f96a67ee12869f8313a48c81377c69e1e8ae0d5af065cf87cca9" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.699579 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-j7v7h" event={"ID":"cc6a025d-a031-4f1d-99fa-1aa711ec4cde","Type":"ContainerDied","Data":"df38479c53ffe530df98c5b8685958e223317cb414de67d3310090cf40a22b67"} Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.699627 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df38479c53ffe530df98c5b8685958e223317cb414de67d3310090cf40a22b67" Nov 28 15:03:45 crc kubenswrapper[4857]: I1128 15:03:45.699755 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-j7v7h" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.312382 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f58c67bc9-qv6mh"] Nov 28 15:03:47 crc kubenswrapper[4857]: E1128 15:03:47.313573 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c1b914f-821e-49ac-854c-3bd5df04ae08" containerName="mariadb-account-create-update" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.313591 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c1b914f-821e-49ac-854c-3bd5df04ae08" containerName="mariadb-account-create-update" Nov 28 15:03:47 crc kubenswrapper[4857]: E1128 15:03:47.313607 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc6a025d-a031-4f1d-99fa-1aa711ec4cde" containerName="mariadb-database-create" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.313614 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc6a025d-a031-4f1d-99fa-1aa711ec4cde" containerName="mariadb-database-create" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.313850 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c1b914f-821e-49ac-854c-3bd5df04ae08" containerName="mariadb-account-create-update" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.313872 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc6a025d-a031-4f1d-99fa-1aa711ec4cde" containerName="mariadb-database-create" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.315013 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.337500 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f58c67bc9-qv6mh"] Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.359619 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-d92fp"] Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.361054 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363159 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-nb\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363197 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-sb\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363256 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8mdz\" (UniqueName: \"kubernetes.io/projected/387fe7ff-da63-4769-a0f8-312523c30249-kube-api-access-m8mdz\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363286 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-dns-svc\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363332 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-config\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363486 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.363713 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gxvvb" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.364293 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.373839 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-d92fp"] Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.465197 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f8162a-0373-42d9-92ac-1089becb850e-logs\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.465291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8mdz\" (UniqueName: \"kubernetes.io/projected/387fe7ff-da63-4769-a0f8-312523c30249-kube-api-access-m8mdz\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.465525 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-dns-svc\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.465653 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-scripts\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.465875 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-config\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.466102 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-combined-ca-bundle\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.466372 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-config-data\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.466417 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-dns-svc\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.466496 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7tgt9\" (UniqueName: \"kubernetes.io/projected/e9f8162a-0373-42d9-92ac-1089becb850e-kube-api-access-7tgt9\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.466565 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-nb\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.466627 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-sb\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.467056 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-config\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.467369 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-sb\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.467751 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-nb\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.488077 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8mdz\" (UniqueName: \"kubernetes.io/projected/387fe7ff-da63-4769-a0f8-312523c30249-kube-api-access-m8mdz\") pod \"dnsmasq-dns-f58c67bc9-qv6mh\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.568773 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f8162a-0373-42d9-92ac-1089becb850e-logs\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.569201 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-scripts\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.569293 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-combined-ca-bundle\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.569323 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-config-data\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.569350 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7tgt9\" (UniqueName: \"kubernetes.io/projected/e9f8162a-0373-42d9-92ac-1089becb850e-kube-api-access-7tgt9\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.569447 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f8162a-0373-42d9-92ac-1089becb850e-logs\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.573420 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-scripts\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.573501 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-config-data\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.581518 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-combined-ca-bundle\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.589391 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7tgt9\" (UniqueName: \"kubernetes.io/projected/e9f8162a-0373-42d9-92ac-1089becb850e-kube-api-access-7tgt9\") pod \"placement-db-sync-d92fp\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.656019 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:47 crc kubenswrapper[4857]: I1128 15:03:47.692683 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.191495 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f58c67bc9-qv6mh"] Nov 28 15:03:48 crc kubenswrapper[4857]: W1128 15:03:48.196503 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod387fe7ff_da63_4769_a0f8_312523c30249.slice/crio-4b8f16805114b297d6e079bf15b69571c403696e7f24fb4bd1c6876718e1603e WatchSource:0}: Error finding container 4b8f16805114b297d6e079bf15b69571c403696e7f24fb4bd1c6876718e1603e: Status 404 returned error can't find the container with id 4b8f16805114b297d6e079bf15b69571c403696e7f24fb4bd1c6876718e1603e Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.227133 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-d92fp"] Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.743732 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d92fp" event={"ID":"e9f8162a-0373-42d9-92ac-1089becb850e","Type":"ContainerStarted","Data":"eaaa6179ec6222b2ddb8c376a23f8fb4d957fc7b0fbfbf1e068804bbaf8b9cdb"} Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.744173 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d92fp" event={"ID":"e9f8162a-0373-42d9-92ac-1089becb850e","Type":"ContainerStarted","Data":"217ed9f45a46f236ad47b175d539b4f3e62b29ba146ff229e6551f8c5e6654b9"} Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.746459 4857 generic.go:334] "Generic (PLEG): container finished" podID="387fe7ff-da63-4769-a0f8-312523c30249" containerID="50b70fbd926bb9809512bc6fb97951d1151b10a03f2e013d45c951bc811d2e5b" exitCode=0 Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.746507 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" event={"ID":"387fe7ff-da63-4769-a0f8-312523c30249","Type":"ContainerDied","Data":"50b70fbd926bb9809512bc6fb97951d1151b10a03f2e013d45c951bc811d2e5b"} Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.746534 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" event={"ID":"387fe7ff-da63-4769-a0f8-312523c30249","Type":"ContainerStarted","Data":"4b8f16805114b297d6e079bf15b69571c403696e7f24fb4bd1c6876718e1603e"} Nov 28 15:03:48 crc kubenswrapper[4857]: I1128 15:03:48.779121 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-d92fp" podStartSLOduration=1.7791008270000002 podStartE2EDuration="1.779100827s" podCreationTimestamp="2025-11-28 15:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:48.771589607 +0000 UTC m=+5678.895531054" watchObservedRunningTime="2025-11-28 15:03:48.779100827 +0000 UTC m=+5678.903042264" Nov 28 15:03:49 crc kubenswrapper[4857]: I1128 15:03:49.762204 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" event={"ID":"387fe7ff-da63-4769-a0f8-312523c30249","Type":"ContainerStarted","Data":"1c32d4cff9c2c1ef0466922ac39d1338cf25b645aedc91be88dd3bddf5c6c288"} Nov 28 15:03:49 crc kubenswrapper[4857]: I1128 15:03:49.762713 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:49 crc kubenswrapper[4857]: I1128 15:03:49.804375 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" podStartSLOduration=2.804334882 podStartE2EDuration="2.804334882s" podCreationTimestamp="2025-11-28 15:03:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:49.799849793 +0000 UTC m=+5679.923791260" watchObservedRunningTime="2025-11-28 15:03:49.804334882 +0000 UTC m=+5679.928276360" Nov 28 15:03:50 crc kubenswrapper[4857]: I1128 15:03:50.780690 4857 generic.go:334] "Generic (PLEG): container finished" podID="e9f8162a-0373-42d9-92ac-1089becb850e" containerID="eaaa6179ec6222b2ddb8c376a23f8fb4d957fc7b0fbfbf1e068804bbaf8b9cdb" exitCode=0 Nov 28 15:03:50 crc kubenswrapper[4857]: I1128 15:03:50.780791 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d92fp" event={"ID":"e9f8162a-0373-42d9-92ac-1089becb850e","Type":"ContainerDied","Data":"eaaa6179ec6222b2ddb8c376a23f8fb4d957fc7b0fbfbf1e068804bbaf8b9cdb"} Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.224665 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.277782 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7tgt9\" (UniqueName: \"kubernetes.io/projected/e9f8162a-0373-42d9-92ac-1089becb850e-kube-api-access-7tgt9\") pod \"e9f8162a-0373-42d9-92ac-1089becb850e\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.277859 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-combined-ca-bundle\") pod \"e9f8162a-0373-42d9-92ac-1089becb850e\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.278066 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f8162a-0373-42d9-92ac-1089becb850e-logs\") pod \"e9f8162a-0373-42d9-92ac-1089becb850e\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.278209 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-config-data\") pod \"e9f8162a-0373-42d9-92ac-1089becb850e\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.278242 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-scripts\") pod \"e9f8162a-0373-42d9-92ac-1089becb850e\" (UID: \"e9f8162a-0373-42d9-92ac-1089becb850e\") " Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.285273 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9f8162a-0373-42d9-92ac-1089becb850e-logs" (OuterVolumeSpecName: "logs") pod "e9f8162a-0373-42d9-92ac-1089becb850e" (UID: "e9f8162a-0373-42d9-92ac-1089becb850e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.298351 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9f8162a-0373-42d9-92ac-1089becb850e-kube-api-access-7tgt9" (OuterVolumeSpecName: "kube-api-access-7tgt9") pod "e9f8162a-0373-42d9-92ac-1089becb850e" (UID: "e9f8162a-0373-42d9-92ac-1089becb850e"). InnerVolumeSpecName "kube-api-access-7tgt9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.298452 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-scripts" (OuterVolumeSpecName: "scripts") pod "e9f8162a-0373-42d9-92ac-1089becb850e" (UID: "e9f8162a-0373-42d9-92ac-1089becb850e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.319797 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-config-data" (OuterVolumeSpecName: "config-data") pod "e9f8162a-0373-42d9-92ac-1089becb850e" (UID: "e9f8162a-0373-42d9-92ac-1089becb850e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.324828 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e9f8162a-0373-42d9-92ac-1089becb850e" (UID: "e9f8162a-0373-42d9-92ac-1089becb850e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.381111 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e9f8162a-0373-42d9-92ac-1089becb850e-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.381158 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.381176 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.381190 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7tgt9\" (UniqueName: \"kubernetes.io/projected/e9f8162a-0373-42d9-92ac-1089becb850e-kube-api-access-7tgt9\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.381206 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e9f8162a-0373-42d9-92ac-1089becb850e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.805607 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-d92fp" event={"ID":"e9f8162a-0373-42d9-92ac-1089becb850e","Type":"ContainerDied","Data":"217ed9f45a46f236ad47b175d539b4f3e62b29ba146ff229e6551f8c5e6654b9"} Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.805658 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="217ed9f45a46f236ad47b175d539b4f3e62b29ba146ff229e6551f8c5e6654b9" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.805742 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-d92fp" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.892777 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-7c8bb4b4b8-6cp59"] Nov 28 15:03:52 crc kubenswrapper[4857]: E1128 15:03:52.893218 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9f8162a-0373-42d9-92ac-1089becb850e" containerName="placement-db-sync" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.893240 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9f8162a-0373-42d9-92ac-1089becb850e" containerName="placement-db-sync" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.893430 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9f8162a-0373-42d9-92ac-1089becb850e" containerName="placement-db-sync" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.894506 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.899381 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.903787 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gxvvb" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.908203 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.919242 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7c8bb4b4b8-6cp59"] Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.995405 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r5qsr\" (UniqueName: \"kubernetes.io/projected/31f48ac3-8aca-4d6e-887f-d75982095216-kube-api-access-r5qsr\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.995856 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-config-data\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.995909 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f48ac3-8aca-4d6e-887f-d75982095216-logs\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.996006 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-scripts\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:52 crc kubenswrapper[4857]: I1128 15:03:52.996063 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-combined-ca-bundle\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.097646 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r5qsr\" (UniqueName: \"kubernetes.io/projected/31f48ac3-8aca-4d6e-887f-d75982095216-kube-api-access-r5qsr\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.098250 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-config-data\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.099246 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f48ac3-8aca-4d6e-887f-d75982095216-logs\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.099690 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-scripts\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.099752 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-combined-ca-bundle\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.099874 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/31f48ac3-8aca-4d6e-887f-d75982095216-logs\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.103717 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-scripts\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.103773 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-config-data\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.103820 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31f48ac3-8aca-4d6e-887f-d75982095216-combined-ca-bundle\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.114325 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r5qsr\" (UniqueName: \"kubernetes.io/projected/31f48ac3-8aca-4d6e-887f-d75982095216-kube-api-access-r5qsr\") pod \"placement-7c8bb4b4b8-6cp59\" (UID: \"31f48ac3-8aca-4d6e-887f-d75982095216\") " pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.213188 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.682429 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-7c8bb4b4b8-6cp59"] Nov 28 15:03:53 crc kubenswrapper[4857]: W1128 15:03:53.683729 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31f48ac3_8aca_4d6e_887f_d75982095216.slice/crio-68301da5143d97e8933126ac9ea81ad66655b0ee78d4fcad545c1cff61aef57d WatchSource:0}: Error finding container 68301da5143d97e8933126ac9ea81ad66655b0ee78d4fcad545c1cff61aef57d: Status 404 returned error can't find the container with id 68301da5143d97e8933126ac9ea81ad66655b0ee78d4fcad545c1cff61aef57d Nov 28 15:03:53 crc kubenswrapper[4857]: I1128 15:03:53.817935 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7c8bb4b4b8-6cp59" event={"ID":"31f48ac3-8aca-4d6e-887f-d75982095216","Type":"ContainerStarted","Data":"68301da5143d97e8933126ac9ea81ad66655b0ee78d4fcad545c1cff61aef57d"} Nov 28 15:03:54 crc kubenswrapper[4857]: I1128 15:03:54.838267 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7c8bb4b4b8-6cp59" event={"ID":"31f48ac3-8aca-4d6e-887f-d75982095216","Type":"ContainerStarted","Data":"0c246d40aa6b2ef8578583f8747e3d704b40862274970d8133b5bd59ce05c868"} Nov 28 15:03:54 crc kubenswrapper[4857]: I1128 15:03:54.838734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-7c8bb4b4b8-6cp59" event={"ID":"31f48ac3-8aca-4d6e-887f-d75982095216","Type":"ContainerStarted","Data":"945c33fec38c7c6af297194ee0fd5c0380b42ac93b7e2e464be3c47fba037486"} Nov 28 15:03:54 crc kubenswrapper[4857]: I1128 15:03:54.838793 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:54 crc kubenswrapper[4857]: I1128 15:03:54.838819 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:03:57 crc kubenswrapper[4857]: I1128 15:03:57.658327 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:03:57 crc kubenswrapper[4857]: I1128 15:03:57.689606 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-7c8bb4b4b8-6cp59" podStartSLOduration=5.689585163 podStartE2EDuration="5.689585163s" podCreationTimestamp="2025-11-28 15:03:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:03:54.86627998 +0000 UTC m=+5684.990221437" watchObservedRunningTime="2025-11-28 15:03:57.689585163 +0000 UTC m=+5687.813526610" Nov 28 15:03:57 crc kubenswrapper[4857]: I1128 15:03:57.749390 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-786fb9969c-gmprd"] Nov 28 15:03:57 crc kubenswrapper[4857]: I1128 15:03:57.749939 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerName="dnsmasq-dns" containerID="cri-o://a9977b5aff16d66941afedf66511b2934faf30314b268b2ac5b8de5ea96b8f8e" gracePeriod=10 Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.885570 4857 generic.go:334] "Generic (PLEG): container finished" podID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerID="a9977b5aff16d66941afedf66511b2934faf30314b268b2ac5b8de5ea96b8f8e" exitCode=0 Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.885693 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" event={"ID":"ef8e7922-cc0b-47b6-a51f-cebe690c264d","Type":"ContainerDied","Data":"a9977b5aff16d66941afedf66511b2934faf30314b268b2ac5b8de5ea96b8f8e"} Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.886066 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" event={"ID":"ef8e7922-cc0b-47b6-a51f-cebe690c264d","Type":"ContainerDied","Data":"e785f0d3ab2a0f3ef7aeaeca8ec7716733ea2976926ee005b5b686491ab05d92"} Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.886087 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e785f0d3ab2a0f3ef7aeaeca8ec7716733ea2976926ee005b5b686491ab05d92" Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.911536 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.930839 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-sb\") pod \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.931165 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-config\") pod \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.931282 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-nb\") pod \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.931409 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lrkdm\" (UniqueName: \"kubernetes.io/projected/ef8e7922-cc0b-47b6-a51f-cebe690c264d-kube-api-access-lrkdm\") pod \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.931589 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-dns-svc\") pod \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\" (UID: \"ef8e7922-cc0b-47b6-a51f-cebe690c264d\") " Nov 28 15:03:58 crc kubenswrapper[4857]: I1128 15:03:58.960596 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef8e7922-cc0b-47b6-a51f-cebe690c264d-kube-api-access-lrkdm" (OuterVolumeSpecName: "kube-api-access-lrkdm") pod "ef8e7922-cc0b-47b6-a51f-cebe690c264d" (UID: "ef8e7922-cc0b-47b6-a51f-cebe690c264d"). InnerVolumeSpecName "kube-api-access-lrkdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.032697 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lrkdm\" (UniqueName: \"kubernetes.io/projected/ef8e7922-cc0b-47b6-a51f-cebe690c264d-kube-api-access-lrkdm\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.035624 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-config" (OuterVolumeSpecName: "config") pod "ef8e7922-cc0b-47b6-a51f-cebe690c264d" (UID: "ef8e7922-cc0b-47b6-a51f-cebe690c264d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.042333 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ef8e7922-cc0b-47b6-a51f-cebe690c264d" (UID: "ef8e7922-cc0b-47b6-a51f-cebe690c264d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.046258 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ef8e7922-cc0b-47b6-a51f-cebe690c264d" (UID: "ef8e7922-cc0b-47b6-a51f-cebe690c264d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.046911 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ef8e7922-cc0b-47b6-a51f-cebe690c264d" (UID: "ef8e7922-cc0b-47b6-a51f-cebe690c264d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.135203 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.135249 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.135263 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.135276 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ef8e7922-cc0b-47b6-a51f-cebe690c264d-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.896919 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-786fb9969c-gmprd" Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.951508 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-786fb9969c-gmprd"] Nov 28 15:03:59 crc kubenswrapper[4857]: I1128 15:03:59.964161 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-786fb9969c-gmprd"] Nov 28 15:04:00 crc kubenswrapper[4857]: I1128 15:04:00.250200 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" path="/var/lib/kubelet/pods/ef8e7922-cc0b-47b6-a51f-cebe690c264d/volumes" Nov 28 15:04:17 crc kubenswrapper[4857]: I1128 15:04:17.804109 4857 scope.go:117] "RemoveContainer" containerID="60a31fc3ffc6f287af1d2ccde25770c5621b71289a47cb6f2cd7b2545b0f7b48" Nov 28 15:04:17 crc kubenswrapper[4857]: I1128 15:04:17.832083 4857 scope.go:117] "RemoveContainer" containerID="d13faed0f994bd7af18e66e41526509932f300faef1a1b534b2af7a0d1075eb0" Nov 28 15:04:24 crc kubenswrapper[4857]: I1128 15:04:24.317400 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:04:24 crc kubenswrapper[4857]: I1128 15:04:24.356520 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-7c8bb4b4b8-6cp59" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.047700 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-6h6vh"] Nov 28 15:04:49 crc kubenswrapper[4857]: E1128 15:04:49.048768 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerName="init" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.048781 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerName="init" Nov 28 15:04:49 crc kubenswrapper[4857]: E1128 15:04:49.048796 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerName="dnsmasq-dns" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.048802 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerName="dnsmasq-dns" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.048978 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef8e7922-cc0b-47b6-a51f-cebe690c264d" containerName="dnsmasq-dns" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.049559 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.069536 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-6h6vh"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.129978 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v56rw\" (UniqueName: \"kubernetes.io/projected/7140a79d-253c-4ac2-b9b2-4df19f81750d-kube-api-access-v56rw\") pod \"nova-api-db-create-6h6vh\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.130057 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7140a79d-253c-4ac2-b9b2-4df19f81750d-operator-scripts\") pod \"nova-api-db-create-6h6vh\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.132473 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-jpt89"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.133586 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.143144 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-jpt89"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.232236 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v56rw\" (UniqueName: \"kubernetes.io/projected/7140a79d-253c-4ac2-b9b2-4df19f81750d-kube-api-access-v56rw\") pod \"nova-api-db-create-6h6vh\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.232561 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7140a79d-253c-4ac2-b9b2-4df19f81750d-operator-scripts\") pod \"nova-api-db-create-6h6vh\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.232665 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/357ad1be-4369-4bbd-89ac-57cc7eefcc78-operator-scripts\") pod \"nova-cell0-db-create-jpt89\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.232763 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9mbm\" (UniqueName: \"kubernetes.io/projected/357ad1be-4369-4bbd-89ac-57cc7eefcc78-kube-api-access-n9mbm\") pod \"nova-cell0-db-create-jpt89\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.233722 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7140a79d-253c-4ac2-b9b2-4df19f81750d-operator-scripts\") pod \"nova-api-db-create-6h6vh\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.248069 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-r2q5q"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.249584 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.259808 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-903a-account-create-update-x8xm5"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.261730 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.268146 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-r2q5q"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.268657 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.280583 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-903a-account-create-update-x8xm5"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.281286 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v56rw\" (UniqueName: \"kubernetes.io/projected/7140a79d-253c-4ac2-b9b2-4df19f81750d-kube-api-access-v56rw\") pod \"nova-api-db-create-6h6vh\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.336741 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4e90d47-02ed-49b5-8911-9d9aa2850884-operator-scripts\") pod \"nova-cell1-db-create-r2q5q\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.336922 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbm6z\" (UniqueName: \"kubernetes.io/projected/a4e90d47-02ed-49b5-8911-9d9aa2850884-kube-api-access-jbm6z\") pod \"nova-cell1-db-create-r2q5q\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.336989 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db5538ba-ffc9-488c-99f2-025ac358a4f6-operator-scripts\") pod \"nova-api-903a-account-create-update-x8xm5\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.337182 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/357ad1be-4369-4bbd-89ac-57cc7eefcc78-operator-scripts\") pod \"nova-cell0-db-create-jpt89\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.337236 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9mbm\" (UniqueName: \"kubernetes.io/projected/357ad1be-4369-4bbd-89ac-57cc7eefcc78-kube-api-access-n9mbm\") pod \"nova-cell0-db-create-jpt89\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.337290 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw8rv\" (UniqueName: \"kubernetes.io/projected/db5538ba-ffc9-488c-99f2-025ac358a4f6-kube-api-access-dw8rv\") pod \"nova-api-903a-account-create-update-x8xm5\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.339727 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/357ad1be-4369-4bbd-89ac-57cc7eefcc78-operator-scripts\") pod \"nova-cell0-db-create-jpt89\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.359493 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9mbm\" (UniqueName: \"kubernetes.io/projected/357ad1be-4369-4bbd-89ac-57cc7eefcc78-kube-api-access-n9mbm\") pod \"nova-cell0-db-create-jpt89\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.379401 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.438981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw8rv\" (UniqueName: \"kubernetes.io/projected/db5538ba-ffc9-488c-99f2-025ac358a4f6-kube-api-access-dw8rv\") pod \"nova-api-903a-account-create-update-x8xm5\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.439085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4e90d47-02ed-49b5-8911-9d9aa2850884-operator-scripts\") pod \"nova-cell1-db-create-r2q5q\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.439125 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbm6z\" (UniqueName: \"kubernetes.io/projected/a4e90d47-02ed-49b5-8911-9d9aa2850884-kube-api-access-jbm6z\") pod \"nova-cell1-db-create-r2q5q\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.439148 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db5538ba-ffc9-488c-99f2-025ac358a4f6-operator-scripts\") pod \"nova-api-903a-account-create-update-x8xm5\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.440120 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db5538ba-ffc9-488c-99f2-025ac358a4f6-operator-scripts\") pod \"nova-api-903a-account-create-update-x8xm5\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.441285 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4e90d47-02ed-49b5-8911-9d9aa2850884-operator-scripts\") pod \"nova-cell1-db-create-r2q5q\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.448354 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.452517 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f25a-account-create-update-hzd8b"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.453704 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.457684 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.459959 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbm6z\" (UniqueName: \"kubernetes.io/projected/a4e90d47-02ed-49b5-8911-9d9aa2850884-kube-api-access-jbm6z\") pod \"nova-cell1-db-create-r2q5q\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.463135 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw8rv\" (UniqueName: \"kubernetes.io/projected/db5538ba-ffc9-488c-99f2-025ac358a4f6-kube-api-access-dw8rv\") pod \"nova-api-903a-account-create-update-x8xm5\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.470076 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f25a-account-create-update-hzd8b"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.542723 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cx9w\" (UniqueName: \"kubernetes.io/projected/705d6dbe-fab7-4f36-be33-c361ae31b8fb-kube-api-access-7cx9w\") pod \"nova-cell0-f25a-account-create-update-hzd8b\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.543260 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/705d6dbe-fab7-4f36-be33-c361ae31b8fb-operator-scripts\") pod \"nova-cell0-f25a-account-create-update-hzd8b\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.573851 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.646138 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.647488 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cx9w\" (UniqueName: \"kubernetes.io/projected/705d6dbe-fab7-4f36-be33-c361ae31b8fb-kube-api-access-7cx9w\") pod \"nova-cell0-f25a-account-create-update-hzd8b\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.647517 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/705d6dbe-fab7-4f36-be33-c361ae31b8fb-operator-scripts\") pod \"nova-cell0-f25a-account-create-update-hzd8b\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.648179 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/705d6dbe-fab7-4f36-be33-c361ae31b8fb-operator-scripts\") pod \"nova-cell0-f25a-account-create-update-hzd8b\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.665767 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cx9w\" (UniqueName: \"kubernetes.io/projected/705d6dbe-fab7-4f36-be33-c361ae31b8fb-kube-api-access-7cx9w\") pod \"nova-cell0-f25a-account-create-update-hzd8b\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.669648 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-04d3-account-create-update-5cm48"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.671878 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.678289 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.692697 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-04d3-account-create-update-5cm48"] Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.750128 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jmpd8\" (UniqueName: \"kubernetes.io/projected/19d127ed-bf35-493e-aeea-1103a89be2e7-kube-api-access-jmpd8\") pod \"nova-cell1-04d3-account-create-update-5cm48\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.750347 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d127ed-bf35-493e-aeea-1103a89be2e7-operator-scripts\") pod \"nova-cell1-04d3-account-create-update-5cm48\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.850451 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.852438 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jmpd8\" (UniqueName: \"kubernetes.io/projected/19d127ed-bf35-493e-aeea-1103a89be2e7-kube-api-access-jmpd8\") pod \"nova-cell1-04d3-account-create-update-5cm48\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.852580 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d127ed-bf35-493e-aeea-1103a89be2e7-operator-scripts\") pod \"nova-cell1-04d3-account-create-update-5cm48\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.853456 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d127ed-bf35-493e-aeea-1103a89be2e7-operator-scripts\") pod \"nova-cell1-04d3-account-create-update-5cm48\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.879312 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jmpd8\" (UniqueName: \"kubernetes.io/projected/19d127ed-bf35-493e-aeea-1103a89be2e7-kube-api-access-jmpd8\") pod \"nova-cell1-04d3-account-create-update-5cm48\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:49 crc kubenswrapper[4857]: I1128 15:04:49.887155 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-6h6vh"] Nov 28 15:04:49 crc kubenswrapper[4857]: W1128 15:04:49.892441 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7140a79d_253c_4ac2_b9b2_4df19f81750d.slice/crio-96f484709dbd837bcf82ae3b771a6f89f2129b2dc791c867ea88594ef151b22e WatchSource:0}: Error finding container 96f484709dbd837bcf82ae3b771a6f89f2129b2dc791c867ea88594ef151b22e: Status 404 returned error can't find the container with id 96f484709dbd837bcf82ae3b771a6f89f2129b2dc791c867ea88594ef151b22e Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.012861 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-jpt89"] Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.026699 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.111307 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-r2q5q"] Nov 28 15:04:50 crc kubenswrapper[4857]: W1128 15:04:50.114639 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4e90d47_02ed_49b5_8911_9d9aa2850884.slice/crio-35b96e5151a62c4c241987bb7b3f2eb406cad480012820b920bc4d9c5d1b2f47 WatchSource:0}: Error finding container 35b96e5151a62c4c241987bb7b3f2eb406cad480012820b920bc4d9c5d1b2f47: Status 404 returned error can't find the container with id 35b96e5151a62c4c241987bb7b3f2eb406cad480012820b920bc4d9c5d1b2f47 Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.212588 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-903a-account-create-update-x8xm5"] Nov 28 15:04:50 crc kubenswrapper[4857]: W1128 15:04:50.226496 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb5538ba_ffc9_488c_99f2_025ac358a4f6.slice/crio-ad422df0b0c708b3c543c3d42f472097b3046783a3f239dc362a1386cfc82d46 WatchSource:0}: Error finding container ad422df0b0c708b3c543c3d42f472097b3046783a3f239dc362a1386cfc82d46: Status 404 returned error can't find the container with id ad422df0b0c708b3c543c3d42f472097b3046783a3f239dc362a1386cfc82d46 Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.371772 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f25a-account-create-update-hzd8b"] Nov 28 15:04:50 crc kubenswrapper[4857]: W1128 15:04:50.385100 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod705d6dbe_fab7_4f36_be33_c361ae31b8fb.slice/crio-09ac47724ef35603d87051ac57c04761f770839cf75a464b5259d4e4f1ec5e31 WatchSource:0}: Error finding container 09ac47724ef35603d87051ac57c04761f770839cf75a464b5259d4e4f1ec5e31: Status 404 returned error can't find the container with id 09ac47724ef35603d87051ac57c04761f770839cf75a464b5259d4e4f1ec5e31 Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.559823 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-04d3-account-create-update-5cm48"] Nov 28 15:04:50 crc kubenswrapper[4857]: W1128 15:04:50.590363 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19d127ed_bf35_493e_aeea_1103a89be2e7.slice/crio-35ccfdac099a33407a44ad892870ab9b9ace52ec7db9b95f42ad0c9d5d6a8336 WatchSource:0}: Error finding container 35ccfdac099a33407a44ad892870ab9b9ace52ec7db9b95f42ad0c9d5d6a8336: Status 404 returned error can't find the container with id 35ccfdac099a33407a44ad892870ab9b9ace52ec7db9b95f42ad0c9d5d6a8336 Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.717250 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r2q5q" event={"ID":"a4e90d47-02ed-49b5-8911-9d9aa2850884","Type":"ContainerStarted","Data":"35b96e5151a62c4c241987bb7b3f2eb406cad480012820b920bc4d9c5d1b2f47"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.722216 4857 generic.go:334] "Generic (PLEG): container finished" podID="7140a79d-253c-4ac2-b9b2-4df19f81750d" containerID="7d6bb09b5267e210f68b7288cab4761134713bfc172beef353db4c1e6aa5a18c" exitCode=0 Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.722315 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6h6vh" event={"ID":"7140a79d-253c-4ac2-b9b2-4df19f81750d","Type":"ContainerDied","Data":"7d6bb09b5267e210f68b7288cab4761134713bfc172beef353db4c1e6aa5a18c"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.722361 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6h6vh" event={"ID":"7140a79d-253c-4ac2-b9b2-4df19f81750d","Type":"ContainerStarted","Data":"96f484709dbd837bcf82ae3b771a6f89f2129b2dc791c867ea88594ef151b22e"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.723849 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-903a-account-create-update-x8xm5" event={"ID":"db5538ba-ffc9-488c-99f2-025ac358a4f6","Type":"ContainerStarted","Data":"ad422df0b0c708b3c543c3d42f472097b3046783a3f239dc362a1386cfc82d46"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.726453 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" event={"ID":"705d6dbe-fab7-4f36-be33-c361ae31b8fb","Type":"ContainerStarted","Data":"09ac47724ef35603d87051ac57c04761f770839cf75a464b5259d4e4f1ec5e31"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.731790 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" event={"ID":"19d127ed-bf35-493e-aeea-1103a89be2e7","Type":"ContainerStarted","Data":"35ccfdac099a33407a44ad892870ab9b9ace52ec7db9b95f42ad0c9d5d6a8336"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.735792 4857 generic.go:334] "Generic (PLEG): container finished" podID="357ad1be-4369-4bbd-89ac-57cc7eefcc78" containerID="c72364aff2492138903cd90d5e46cd7989fe733d5810f9654591dbd733d8f814" exitCode=0 Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.735851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-jpt89" event={"ID":"357ad1be-4369-4bbd-89ac-57cc7eefcc78","Type":"ContainerDied","Data":"c72364aff2492138903cd90d5e46cd7989fe733d5810f9654591dbd733d8f814"} Nov 28 15:04:50 crc kubenswrapper[4857]: I1128 15:04:50.735900 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-jpt89" event={"ID":"357ad1be-4369-4bbd-89ac-57cc7eefcc78","Type":"ContainerStarted","Data":"704d0bc52dc055f323b2bb8c97f64978d88dc7aeed7c9f183c1df94986bd80a2"} Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.771218 4857 generic.go:334] "Generic (PLEG): container finished" podID="19d127ed-bf35-493e-aeea-1103a89be2e7" containerID="29a8a2f4e184a8bb9e0ba03adbd89dafbc988e030b4b3a6226a7f13c0fdd91db" exitCode=0 Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.771356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" event={"ID":"19d127ed-bf35-493e-aeea-1103a89be2e7","Type":"ContainerDied","Data":"29a8a2f4e184a8bb9e0ba03adbd89dafbc988e030b4b3a6226a7f13c0fdd91db"} Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.781897 4857 generic.go:334] "Generic (PLEG): container finished" podID="a4e90d47-02ed-49b5-8911-9d9aa2850884" containerID="515cc291c680a8a395fc2cffdaf10e466d90e00c85eca6e2f772af8d3a0a94cd" exitCode=0 Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.782031 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r2q5q" event={"ID":"a4e90d47-02ed-49b5-8911-9d9aa2850884","Type":"ContainerDied","Data":"515cc291c680a8a395fc2cffdaf10e466d90e00c85eca6e2f772af8d3a0a94cd"} Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.785469 4857 generic.go:334] "Generic (PLEG): container finished" podID="db5538ba-ffc9-488c-99f2-025ac358a4f6" containerID="56474241f936993ef7e3a6e7b786e08337c4bef296b4797c0d36684df071a854" exitCode=0 Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.785544 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-903a-account-create-update-x8xm5" event={"ID":"db5538ba-ffc9-488c-99f2-025ac358a4f6","Type":"ContainerDied","Data":"56474241f936993ef7e3a6e7b786e08337c4bef296b4797c0d36684df071a854"} Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.788129 4857 generic.go:334] "Generic (PLEG): container finished" podID="705d6dbe-fab7-4f36-be33-c361ae31b8fb" containerID="34309413075ac307ead93b48426fa847fedb3f47aa1521f92cbb3b87a992e16a" exitCode=0 Nov 28 15:04:51 crc kubenswrapper[4857]: I1128 15:04:51.788196 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" event={"ID":"705d6dbe-fab7-4f36-be33-c361ae31b8fb","Type":"ContainerDied","Data":"34309413075ac307ead93b48426fa847fedb3f47aa1521f92cbb3b87a992e16a"} Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.254507 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.262280 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.415201 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v56rw\" (UniqueName: \"kubernetes.io/projected/7140a79d-253c-4ac2-b9b2-4df19f81750d-kube-api-access-v56rw\") pod \"7140a79d-253c-4ac2-b9b2-4df19f81750d\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.415329 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9mbm\" (UniqueName: \"kubernetes.io/projected/357ad1be-4369-4bbd-89ac-57cc7eefcc78-kube-api-access-n9mbm\") pod \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.415365 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/357ad1be-4369-4bbd-89ac-57cc7eefcc78-operator-scripts\") pod \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\" (UID: \"357ad1be-4369-4bbd-89ac-57cc7eefcc78\") " Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.415392 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7140a79d-253c-4ac2-b9b2-4df19f81750d-operator-scripts\") pod \"7140a79d-253c-4ac2-b9b2-4df19f81750d\" (UID: \"7140a79d-253c-4ac2-b9b2-4df19f81750d\") " Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.416832 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/357ad1be-4369-4bbd-89ac-57cc7eefcc78-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "357ad1be-4369-4bbd-89ac-57cc7eefcc78" (UID: "357ad1be-4369-4bbd-89ac-57cc7eefcc78"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.417130 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7140a79d-253c-4ac2-b9b2-4df19f81750d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7140a79d-253c-4ac2-b9b2-4df19f81750d" (UID: "7140a79d-253c-4ac2-b9b2-4df19f81750d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.422915 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/357ad1be-4369-4bbd-89ac-57cc7eefcc78-kube-api-access-n9mbm" (OuterVolumeSpecName: "kube-api-access-n9mbm") pod "357ad1be-4369-4bbd-89ac-57cc7eefcc78" (UID: "357ad1be-4369-4bbd-89ac-57cc7eefcc78"). InnerVolumeSpecName "kube-api-access-n9mbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.424148 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7140a79d-253c-4ac2-b9b2-4df19f81750d-kube-api-access-v56rw" (OuterVolumeSpecName: "kube-api-access-v56rw") pod "7140a79d-253c-4ac2-b9b2-4df19f81750d" (UID: "7140a79d-253c-4ac2-b9b2-4df19f81750d"). InnerVolumeSpecName "kube-api-access-v56rw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.518157 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/357ad1be-4369-4bbd-89ac-57cc7eefcc78-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.518223 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7140a79d-253c-4ac2-b9b2-4df19f81750d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.518245 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v56rw\" (UniqueName: \"kubernetes.io/projected/7140a79d-253c-4ac2-b9b2-4df19f81750d-kube-api-access-v56rw\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.518267 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9mbm\" (UniqueName: \"kubernetes.io/projected/357ad1be-4369-4bbd-89ac-57cc7eefcc78-kube-api-access-n9mbm\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.802774 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-jpt89" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.802747 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-jpt89" event={"ID":"357ad1be-4369-4bbd-89ac-57cc7eefcc78","Type":"ContainerDied","Data":"704d0bc52dc055f323b2bb8c97f64978d88dc7aeed7c9f183c1df94986bd80a2"} Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.803094 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="704d0bc52dc055f323b2bb8c97f64978d88dc7aeed7c9f183c1df94986bd80a2" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.807089 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-6h6vh" Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.807089 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-6h6vh" event={"ID":"7140a79d-253c-4ac2-b9b2-4df19f81750d","Type":"ContainerDied","Data":"96f484709dbd837bcf82ae3b771a6f89f2129b2dc791c867ea88594ef151b22e"} Nov 28 15:04:52 crc kubenswrapper[4857]: I1128 15:04:52.807169 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96f484709dbd837bcf82ae3b771a6f89f2129b2dc791c867ea88594ef151b22e" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.319684 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.444422 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db5538ba-ffc9-488c-99f2-025ac358a4f6-operator-scripts\") pod \"db5538ba-ffc9-488c-99f2-025ac358a4f6\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.444923 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dw8rv\" (UniqueName: \"kubernetes.io/projected/db5538ba-ffc9-488c-99f2-025ac358a4f6-kube-api-access-dw8rv\") pod \"db5538ba-ffc9-488c-99f2-025ac358a4f6\" (UID: \"db5538ba-ffc9-488c-99f2-025ac358a4f6\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.445105 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/db5538ba-ffc9-488c-99f2-025ac358a4f6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "db5538ba-ffc9-488c-99f2-025ac358a4f6" (UID: "db5538ba-ffc9-488c-99f2-025ac358a4f6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.445494 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/db5538ba-ffc9-488c-99f2-025ac358a4f6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.448970 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db5538ba-ffc9-488c-99f2-025ac358a4f6-kube-api-access-dw8rv" (OuterVolumeSpecName: "kube-api-access-dw8rv") pod "db5538ba-ffc9-488c-99f2-025ac358a4f6" (UID: "db5538ba-ffc9-488c-99f2-025ac358a4f6"). InnerVolumeSpecName "kube-api-access-dw8rv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.510483 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.516735 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.536145 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.551746 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dw8rv\" (UniqueName: \"kubernetes.io/projected/db5538ba-ffc9-488c-99f2-025ac358a4f6-kube-api-access-dw8rv\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.653333 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4e90d47-02ed-49b5-8911-9d9aa2850884-operator-scripts\") pod \"a4e90d47-02ed-49b5-8911-9d9aa2850884\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.653476 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cx9w\" (UniqueName: \"kubernetes.io/projected/705d6dbe-fab7-4f36-be33-c361ae31b8fb-kube-api-access-7cx9w\") pod \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.653517 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/705d6dbe-fab7-4f36-be33-c361ae31b8fb-operator-scripts\") pod \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\" (UID: \"705d6dbe-fab7-4f36-be33-c361ae31b8fb\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.653613 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbm6z\" (UniqueName: \"kubernetes.io/projected/a4e90d47-02ed-49b5-8911-9d9aa2850884-kube-api-access-jbm6z\") pod \"a4e90d47-02ed-49b5-8911-9d9aa2850884\" (UID: \"a4e90d47-02ed-49b5-8911-9d9aa2850884\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.653753 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jmpd8\" (UniqueName: \"kubernetes.io/projected/19d127ed-bf35-493e-aeea-1103a89be2e7-kube-api-access-jmpd8\") pod \"19d127ed-bf35-493e-aeea-1103a89be2e7\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.653781 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d127ed-bf35-493e-aeea-1103a89be2e7-operator-scripts\") pod \"19d127ed-bf35-493e-aeea-1103a89be2e7\" (UID: \"19d127ed-bf35-493e-aeea-1103a89be2e7\") " Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.654012 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4e90d47-02ed-49b5-8911-9d9aa2850884-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4e90d47-02ed-49b5-8911-9d9aa2850884" (UID: "a4e90d47-02ed-49b5-8911-9d9aa2850884"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.654294 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/705d6dbe-fab7-4f36-be33-c361ae31b8fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "705d6dbe-fab7-4f36-be33-c361ae31b8fb" (UID: "705d6dbe-fab7-4f36-be33-c361ae31b8fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.654652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19d127ed-bf35-493e-aeea-1103a89be2e7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "19d127ed-bf35-493e-aeea-1103a89be2e7" (UID: "19d127ed-bf35-493e-aeea-1103a89be2e7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.658440 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4e90d47-02ed-49b5-8911-9d9aa2850884-kube-api-access-jbm6z" (OuterVolumeSpecName: "kube-api-access-jbm6z") pod "a4e90d47-02ed-49b5-8911-9d9aa2850884" (UID: "a4e90d47-02ed-49b5-8911-9d9aa2850884"). InnerVolumeSpecName "kube-api-access-jbm6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.659682 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/705d6dbe-fab7-4f36-be33-c361ae31b8fb-kube-api-access-7cx9w" (OuterVolumeSpecName: "kube-api-access-7cx9w") pod "705d6dbe-fab7-4f36-be33-c361ae31b8fb" (UID: "705d6dbe-fab7-4f36-be33-c361ae31b8fb"). InnerVolumeSpecName "kube-api-access-7cx9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.663310 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19d127ed-bf35-493e-aeea-1103a89be2e7-kube-api-access-jmpd8" (OuterVolumeSpecName: "kube-api-access-jmpd8") pod "19d127ed-bf35-493e-aeea-1103a89be2e7" (UID: "19d127ed-bf35-493e-aeea-1103a89be2e7"). InnerVolumeSpecName "kube-api-access-jmpd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.756204 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jmpd8\" (UniqueName: \"kubernetes.io/projected/19d127ed-bf35-493e-aeea-1103a89be2e7-kube-api-access-jmpd8\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.756245 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/19d127ed-bf35-493e-aeea-1103a89be2e7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.756255 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4e90d47-02ed-49b5-8911-9d9aa2850884-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.756265 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cx9w\" (UniqueName: \"kubernetes.io/projected/705d6dbe-fab7-4f36-be33-c361ae31b8fb-kube-api-access-7cx9w\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.756277 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/705d6dbe-fab7-4f36-be33-c361ae31b8fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.756286 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbm6z\" (UniqueName: \"kubernetes.io/projected/a4e90d47-02ed-49b5-8911-9d9aa2850884-kube-api-access-jbm6z\") on node \"crc\" DevicePath \"\"" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.820010 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.820015 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f25a-account-create-update-hzd8b" event={"ID":"705d6dbe-fab7-4f36-be33-c361ae31b8fb","Type":"ContainerDied","Data":"09ac47724ef35603d87051ac57c04761f770839cf75a464b5259d4e4f1ec5e31"} Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.820611 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09ac47724ef35603d87051ac57c04761f770839cf75a464b5259d4e4f1ec5e31" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.821821 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" event={"ID":"19d127ed-bf35-493e-aeea-1103a89be2e7","Type":"ContainerDied","Data":"35ccfdac099a33407a44ad892870ab9b9ace52ec7db9b95f42ad0c9d5d6a8336"} Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.821884 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35ccfdac099a33407a44ad892870ab9b9ace52ec7db9b95f42ad0c9d5d6a8336" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.821846 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-04d3-account-create-update-5cm48" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.823905 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-r2q5q" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.823902 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-r2q5q" event={"ID":"a4e90d47-02ed-49b5-8911-9d9aa2850884","Type":"ContainerDied","Data":"35b96e5151a62c4c241987bb7b3f2eb406cad480012820b920bc4d9c5d1b2f47"} Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.824148 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35b96e5151a62c4c241987bb7b3f2eb406cad480012820b920bc4d9c5d1b2f47" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.826080 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-903a-account-create-update-x8xm5" event={"ID":"db5538ba-ffc9-488c-99f2-025ac358a4f6","Type":"ContainerDied","Data":"ad422df0b0c708b3c543c3d42f472097b3046783a3f239dc362a1386cfc82d46"} Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.826121 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad422df0b0c708b3c543c3d42f472097b3046783a3f239dc362a1386cfc82d46" Nov 28 15:04:53 crc kubenswrapper[4857]: I1128 15:04:53.826146 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-903a-account-create-update-x8xm5" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.691826 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ctcbx"] Nov 28 15:04:59 crc kubenswrapper[4857]: E1128 15:04:59.693202 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db5538ba-ffc9-488c-99f2-025ac358a4f6" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.693224 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="db5538ba-ffc9-488c-99f2-025ac358a4f6" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: E1128 15:04:59.693258 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="357ad1be-4369-4bbd-89ac-57cc7eefcc78" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.693268 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="357ad1be-4369-4bbd-89ac-57cc7eefcc78" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: E1128 15:04:59.693286 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19d127ed-bf35-493e-aeea-1103a89be2e7" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.693295 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="19d127ed-bf35-493e-aeea-1103a89be2e7" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: E1128 15:04:59.693309 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e90d47-02ed-49b5-8911-9d9aa2850884" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.693649 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e90d47-02ed-49b5-8911-9d9aa2850884" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: E1128 15:04:59.693879 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7140a79d-253c-4ac2-b9b2-4df19f81750d" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.693902 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7140a79d-253c-4ac2-b9b2-4df19f81750d" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: E1128 15:04:59.693976 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="705d6dbe-fab7-4f36-be33-c361ae31b8fb" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.693984 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="705d6dbe-fab7-4f36-be33-c361ae31b8fb" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.695578 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="357ad1be-4369-4bbd-89ac-57cc7eefcc78" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.695636 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="705d6dbe-fab7-4f36-be33-c361ae31b8fb" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.695684 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="db5538ba-ffc9-488c-99f2-025ac358a4f6" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.695706 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="19d127ed-bf35-493e-aeea-1103a89be2e7" containerName="mariadb-account-create-update" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.695715 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4e90d47-02ed-49b5-8911-9d9aa2850884" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.695740 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7140a79d-253c-4ac2-b9b2-4df19f81750d" containerName="mariadb-database-create" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.697157 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.721683 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.722075 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-dptq8" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.730418 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.765840 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ctcbx"] Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.790347 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kndhs\" (UniqueName: \"kubernetes.io/projected/a9e701d2-f81d-475e-8cda-c4a598fd2032-kube-api-access-kndhs\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.790462 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.790546 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-config-data\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.790577 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-scripts\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.892653 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-scripts\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.892783 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kndhs\" (UniqueName: \"kubernetes.io/projected/a9e701d2-f81d-475e-8cda-c4a598fd2032-kube-api-access-kndhs\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.892806 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.892850 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-config-data\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.899016 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-config-data\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.899321 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.899669 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-scripts\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:04:59 crc kubenswrapper[4857]: I1128 15:04:59.923580 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kndhs\" (UniqueName: \"kubernetes.io/projected/a9e701d2-f81d-475e-8cda-c4a598fd2032-kube-api-access-kndhs\") pod \"nova-cell0-conductor-db-sync-ctcbx\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:05:00 crc kubenswrapper[4857]: I1128 15:05:00.060684 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:05:00 crc kubenswrapper[4857]: I1128 15:05:00.593964 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ctcbx"] Nov 28 15:05:00 crc kubenswrapper[4857]: I1128 15:05:00.892187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" event={"ID":"a9e701d2-f81d-475e-8cda-c4a598fd2032","Type":"ContainerStarted","Data":"d95e21f48ecab9c7c41b2116c8df961088698c68b9c185adcfeb6010137692e9"} Nov 28 15:05:01 crc kubenswrapper[4857]: I1128 15:05:01.905809 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" event={"ID":"a9e701d2-f81d-475e-8cda-c4a598fd2032","Type":"ContainerStarted","Data":"3b8c4435d954477bed19b69cee5ff200b79a8620111a52046c1cf2082711f02c"} Nov 28 15:05:01 crc kubenswrapper[4857]: I1128 15:05:01.945692 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" podStartSLOduration=2.945669756 podStartE2EDuration="2.945669756s" podCreationTimestamp="2025-11-28 15:04:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:01.93346449 +0000 UTC m=+5752.057405957" watchObservedRunningTime="2025-11-28 15:05:01.945669756 +0000 UTC m=+5752.069611203" Nov 28 15:05:06 crc kubenswrapper[4857]: I1128 15:05:06.956129 4857 generic.go:334] "Generic (PLEG): container finished" podID="a9e701d2-f81d-475e-8cda-c4a598fd2032" containerID="3b8c4435d954477bed19b69cee5ff200b79a8620111a52046c1cf2082711f02c" exitCode=0 Nov 28 15:05:06 crc kubenswrapper[4857]: I1128 15:05:06.956274 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" event={"ID":"a9e701d2-f81d-475e-8cda-c4a598fd2032","Type":"ContainerDied","Data":"3b8c4435d954477bed19b69cee5ff200b79a8620111a52046c1cf2082711f02c"} Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.457661 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.588719 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-config-data\") pod \"a9e701d2-f81d-475e-8cda-c4a598fd2032\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.588785 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-scripts\") pod \"a9e701d2-f81d-475e-8cda-c4a598fd2032\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.588881 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-combined-ca-bundle\") pod \"a9e701d2-f81d-475e-8cda-c4a598fd2032\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.588992 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kndhs\" (UniqueName: \"kubernetes.io/projected/a9e701d2-f81d-475e-8cda-c4a598fd2032-kube-api-access-kndhs\") pod \"a9e701d2-f81d-475e-8cda-c4a598fd2032\" (UID: \"a9e701d2-f81d-475e-8cda-c4a598fd2032\") " Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.595347 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-scripts" (OuterVolumeSpecName: "scripts") pod "a9e701d2-f81d-475e-8cda-c4a598fd2032" (UID: "a9e701d2-f81d-475e-8cda-c4a598fd2032"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.595597 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9e701d2-f81d-475e-8cda-c4a598fd2032-kube-api-access-kndhs" (OuterVolumeSpecName: "kube-api-access-kndhs") pod "a9e701d2-f81d-475e-8cda-c4a598fd2032" (UID: "a9e701d2-f81d-475e-8cda-c4a598fd2032"). InnerVolumeSpecName "kube-api-access-kndhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.617832 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-config-data" (OuterVolumeSpecName: "config-data") pod "a9e701d2-f81d-475e-8cda-c4a598fd2032" (UID: "a9e701d2-f81d-475e-8cda-c4a598fd2032"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.618713 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9e701d2-f81d-475e-8cda-c4a598fd2032" (UID: "a9e701d2-f81d-475e-8cda-c4a598fd2032"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.691179 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.691217 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kndhs\" (UniqueName: \"kubernetes.io/projected/a9e701d2-f81d-475e-8cda-c4a598fd2032-kube-api-access-kndhs\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.691231 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.691243 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9e701d2-f81d-475e-8cda-c4a598fd2032-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.983411 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" event={"ID":"a9e701d2-f81d-475e-8cda-c4a598fd2032","Type":"ContainerDied","Data":"d95e21f48ecab9c7c41b2116c8df961088698c68b9c185adcfeb6010137692e9"} Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.983468 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d95e21f48ecab9c7c41b2116c8df961088698c68b9c185adcfeb6010137692e9" Nov 28 15:05:08 crc kubenswrapper[4857]: I1128 15:05:08.983535 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-ctcbx" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.087359 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:05:09 crc kubenswrapper[4857]: E1128 15:05:09.088088 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e701d2-f81d-475e-8cda-c4a598fd2032" containerName="nova-cell0-conductor-db-sync" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.088171 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e701d2-f81d-475e-8cda-c4a598fd2032" containerName="nova-cell0-conductor-db-sync" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.088401 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9e701d2-f81d-475e-8cda-c4a598fd2032" containerName="nova-cell0-conductor-db-sync" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.089312 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.094860 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-dptq8" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.095321 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.109978 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.207597 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.207649 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.207740 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z66k7\" (UniqueName: \"kubernetes.io/projected/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-kube-api-access-z66k7\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.308991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.309032 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.309085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z66k7\" (UniqueName: \"kubernetes.io/projected/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-kube-api-access-z66k7\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.319755 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.331690 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.335881 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z66k7\" (UniqueName: \"kubernetes.io/projected/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-kube-api-access-z66k7\") pod \"nova-cell0-conductor-0\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:09 crc kubenswrapper[4857]: I1128 15:05:09.410694 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.597769 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.833300 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c2mg9"] Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.835265 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.850858 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c2mg9"] Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.943382 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-utilities\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.943853 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsv5p\" (UniqueName: \"kubernetes.io/projected/52759ded-d5c3-4670-8be4-387b171ac97d-kube-api-access-nsv5p\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:10 crc kubenswrapper[4857]: I1128 15:05:10.943910 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-catalog-content\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.002559 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1478276e-09fe-4ff5-bf7e-235cc6c59ad2","Type":"ContainerStarted","Data":"62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea"} Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.002603 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1478276e-09fe-4ff5-bf7e-235cc6c59ad2","Type":"ContainerStarted","Data":"8869393c16267847214bc2de4321089854ec7402e3a20c6412fc3614abfbf1d4"} Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.002747 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.019837 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.019820792 podStartE2EDuration="2.019820792s" podCreationTimestamp="2025-11-28 15:05:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:11.015591239 +0000 UTC m=+5761.139532676" watchObservedRunningTime="2025-11-28 15:05:11.019820792 +0000 UTC m=+5761.143762229" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.045844 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsv5p\" (UniqueName: \"kubernetes.io/projected/52759ded-d5c3-4670-8be4-387b171ac97d-kube-api-access-nsv5p\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.045981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-catalog-content\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.046035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-utilities\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.046608 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-utilities\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.046633 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-catalog-content\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.067068 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsv5p\" (UniqueName: \"kubernetes.io/projected/52759ded-d5c3-4670-8be4-387b171ac97d-kube-api-access-nsv5p\") pod \"certified-operators-c2mg9\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.159315 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.308595 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.308983 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:05:11 crc kubenswrapper[4857]: I1128 15:05:11.661395 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c2mg9"] Nov 28 15:05:11 crc kubenswrapper[4857]: W1128 15:05:11.670565 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52759ded_d5c3_4670_8be4_387b171ac97d.slice/crio-fe336536d7b97d567c50ede5a3de9583ce9ce153d834cdb4badf4cde7f899946 WatchSource:0}: Error finding container fe336536d7b97d567c50ede5a3de9583ce9ce153d834cdb4badf4cde7f899946: Status 404 returned error can't find the container with id fe336536d7b97d567c50ede5a3de9583ce9ce153d834cdb4badf4cde7f899946 Nov 28 15:05:12 crc kubenswrapper[4857]: I1128 15:05:12.011465 4857 generic.go:334] "Generic (PLEG): container finished" podID="52759ded-d5c3-4670-8be4-387b171ac97d" containerID="56727d3cf65b3d32cfa483c994c376b9874042e1cb2b9909a2d307fa0e125180" exitCode=0 Nov 28 15:05:12 crc kubenswrapper[4857]: I1128 15:05:12.011576 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2mg9" event={"ID":"52759ded-d5c3-4670-8be4-387b171ac97d","Type":"ContainerDied","Data":"56727d3cf65b3d32cfa483c994c376b9874042e1cb2b9909a2d307fa0e125180"} Nov 28 15:05:12 crc kubenswrapper[4857]: I1128 15:05:12.012032 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2mg9" event={"ID":"52759ded-d5c3-4670-8be4-387b171ac97d","Type":"ContainerStarted","Data":"fe336536d7b97d567c50ede5a3de9583ce9ce153d834cdb4badf4cde7f899946"} Nov 28 15:05:12 crc kubenswrapper[4857]: I1128 15:05:12.013556 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:05:14 crc kubenswrapper[4857]: I1128 15:05:14.034941 4857 generic.go:334] "Generic (PLEG): container finished" podID="52759ded-d5c3-4670-8be4-387b171ac97d" containerID="d0586034b0e1ec57aaf810903325384698f3aeda025cd7630dcc3443257a3aa3" exitCode=0 Nov 28 15:05:14 crc kubenswrapper[4857]: I1128 15:05:14.035053 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2mg9" event={"ID":"52759ded-d5c3-4670-8be4-387b171ac97d","Type":"ContainerDied","Data":"d0586034b0e1ec57aaf810903325384698f3aeda025cd7630dcc3443257a3aa3"} Nov 28 15:05:15 crc kubenswrapper[4857]: I1128 15:05:15.051695 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2mg9" event={"ID":"52759ded-d5c3-4670-8be4-387b171ac97d","Type":"ContainerStarted","Data":"c74c856a899b9d5c2e547d04d22bc1bd3f90653812ae75b653ac36de7896a1cb"} Nov 28 15:05:15 crc kubenswrapper[4857]: I1128 15:05:15.085360 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c2mg9" podStartSLOduration=2.580832918 podStartE2EDuration="5.085335175s" podCreationTimestamp="2025-11-28 15:05:10 +0000 UTC" firstStartedPulling="2025-11-28 15:05:12.0133423 +0000 UTC m=+5762.137283737" lastFinishedPulling="2025-11-28 15:05:14.517844527 +0000 UTC m=+5764.641785994" observedRunningTime="2025-11-28 15:05:15.081383779 +0000 UTC m=+5765.205325226" watchObservedRunningTime="2025-11-28 15:05:15.085335175 +0000 UTC m=+5765.209276652" Nov 28 15:05:17 crc kubenswrapper[4857]: I1128 15:05:17.997835 4857 scope.go:117] "RemoveContainer" containerID="3baff6510f8a2e626f59d7bb0fd4871cf8ce8a5653410ee1d0f443864bec4fc5" Nov 28 15:05:18 crc kubenswrapper[4857]: I1128 15:05:18.026431 4857 scope.go:117] "RemoveContainer" containerID="182ea2ef9e9ffe6017b9dc5af6c4d660035946c417d9373027669789c1a43ff7" Nov 28 15:05:18 crc kubenswrapper[4857]: I1128 15:05:18.116165 4857 scope.go:117] "RemoveContainer" containerID="6a5aad84ec212bcba85d5b36f942483ddda595a2a0edacbd0f90f0d6499f7106" Nov 28 15:05:18 crc kubenswrapper[4857]: I1128 15:05:18.192714 4857 scope.go:117] "RemoveContainer" containerID="979a6c7f6b95eed556017d8765bbabb1f12e983ef8f68673d7c4ccdde0f578df" Nov 28 15:05:18 crc kubenswrapper[4857]: I1128 15:05:18.210926 4857 scope.go:117] "RemoveContainer" containerID="62ac0b82ba99f6cf5bcf3a4a9eb216069875e582c58671d65efe3f41ec3cf77b" Nov 28 15:05:18 crc kubenswrapper[4857]: I1128 15:05:18.243185 4857 scope.go:117] "RemoveContainer" containerID="6968c94745783c56961b5594978f2546c7fe813be98b7aa148992629cb221b09" Nov 28 15:05:19 crc kubenswrapper[4857]: I1128 15:05:19.450572 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 15:05:19 crc kubenswrapper[4857]: I1128 15:05:19.993887 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-27hfs"] Nov 28 15:05:19 crc kubenswrapper[4857]: I1128 15:05:19.997388 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.003142 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.003781 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.018846 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-27hfs"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.144350 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-config-data\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.144415 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-scripts\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.144502 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldbgn\" (UniqueName: \"kubernetes.io/projected/2c3aaa36-829f-4244-a40a-73810966b9ea-kube-api-access-ldbgn\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.144737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.175974 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.177797 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.193410 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.194576 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.249195 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-config-data\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.249243 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-scripts\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.249291 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldbgn\" (UniqueName: \"kubernetes.io/projected/2c3aaa36-829f-4244-a40a-73810966b9ea-kube-api-access-ldbgn\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.249335 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.260208 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-config-data\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.260278 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-scripts\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.268614 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.282728 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldbgn\" (UniqueName: \"kubernetes.io/projected/2c3aaa36-829f-4244-a40a-73810966b9ea-kube-api-access-ldbgn\") pod \"nova-cell0-cell-mapping-27hfs\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.344466 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.353490 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25adb9bf-3f43-4e42-a7c6-00b8a1683722-logs\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.353545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbdkh\" (UniqueName: \"kubernetes.io/projected/25adb9bf-3f43-4e42-a7c6-00b8a1683722-kube-api-access-dbdkh\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.353588 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.353660 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-config-data\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.373195 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.375383 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.414696 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.425995 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.458070 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.462100 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.470914 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25adb9bf-3f43-4e42-a7c6-00b8a1683722-logs\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.471050 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbdkh\" (UniqueName: \"kubernetes.io/projected/25adb9bf-3f43-4e42-a7c6-00b8a1683722-kube-api-access-dbdkh\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.471097 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.471150 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-config-data\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.489653 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.490457 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25adb9bf-3f43-4e42-a7c6-00b8a1683722-logs\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.581667 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbdkh\" (UniqueName: \"kubernetes.io/projected/25adb9bf-3f43-4e42-a7c6-00b8a1683722-kube-api-access-dbdkh\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.584438 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-config-data\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.585264 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.587649 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.589511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.589714 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69cq7\" (UniqueName: \"kubernetes.io/projected/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-kube-api-access-69cq7\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.590218 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-logs\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.598450 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-config-data\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.598539 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8gfq\" (UniqueName: \"kubernetes.io/projected/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-kube-api-access-r8gfq\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.598721 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.598747 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-config-data\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.604264 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.605890 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.612978 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.622045 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.628534 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68986bcc97-7cg9g"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.630526 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.646364 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68986bcc97-7cg9g"] Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700442 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700497 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69cq7\" (UniqueName: \"kubernetes.io/projected/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-kube-api-access-69cq7\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700571 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-logs\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700603 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700659 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-config-data\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700688 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8gfq\" (UniqueName: \"kubernetes.io/projected/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-kube-api-access-r8gfq\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700751 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700776 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-config-data\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.700810 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t66g\" (UniqueName: \"kubernetes.io/projected/3f559063-f4d6-4d59-988c-a1838761d3af-kube-api-access-7t66g\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.701140 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-logs\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.707267 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.710341 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-config-data\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.711624 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.716400 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-config-data\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.748160 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8gfq\" (UniqueName: \"kubernetes.io/projected/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-kube-api-access-r8gfq\") pod \"nova-metadata-0\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.751168 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69cq7\" (UniqueName: \"kubernetes.io/projected/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-kube-api-access-69cq7\") pod \"nova-scheduler-0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.758146 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.801411 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802580 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t66g\" (UniqueName: \"kubernetes.io/projected/3f559063-f4d6-4d59-988c-a1838761d3af-kube-api-access-7t66g\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802618 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-sb\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802648 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttsbh\" (UniqueName: \"kubernetes.io/projected/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-kube-api-access-ttsbh\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802671 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-config\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802700 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-nb\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802740 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802761 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.802800 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-dns-svc\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.818033 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.829582 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.834908 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t66g\" (UniqueName: \"kubernetes.io/projected/3f559063-f4d6-4d59-988c-a1838761d3af-kube-api-access-7t66g\") pod \"nova-cell1-novncproxy-0\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.886468 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.904637 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-config\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.904721 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-nb\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.904828 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-dns-svc\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.904903 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-sb\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.904937 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttsbh\" (UniqueName: \"kubernetes.io/projected/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-kube-api-access-ttsbh\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.905981 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-config\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.906251 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-dns-svc\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.906616 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-sb\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.907937 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-nb\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.925540 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttsbh\" (UniqueName: \"kubernetes.io/projected/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-kube-api-access-ttsbh\") pod \"dnsmasq-dns-68986bcc97-7cg9g\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.938290 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:20 crc kubenswrapper[4857]: I1128 15:05:20.967750 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.093778 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-27hfs"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.160344 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.161984 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.258240 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.265884 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-99ghx"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.270069 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.275226 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.278057 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.314916 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-99ghx"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.345004 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.418116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.418161 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-config-data\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.418286 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-scripts\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.418323 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmgvc\" (UniqueName: \"kubernetes.io/projected/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-kube-api-access-vmgvc\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.519762 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmgvc\" (UniqueName: \"kubernetes.io/projected/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-kube-api-access-vmgvc\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.519818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.519847 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-config-data\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.520000 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-scripts\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.538171 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-scripts\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.557582 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.566322 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-config-data\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.572978 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.630591 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmgvc\" (UniqueName: \"kubernetes.io/projected/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-kube-api-access-vmgvc\") pod \"nova-cell1-conductor-db-sync-99ghx\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.711006 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.730011 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.740765 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68986bcc97-7cg9g"] Nov 28 15:05:21 crc kubenswrapper[4857]: I1128 15:05:21.770021 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.179455 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"3f559063-f4d6-4d59-988c-a1838761d3af","Type":"ContainerStarted","Data":"026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.179874 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"3f559063-f4d6-4d59-988c-a1838761d3af","Type":"ContainerStarted","Data":"c709ea2d8abc922b29c60039f4d18e79936c1157eeb6b0ddb9426494b264f09c"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.185973 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c7490c0-7ddc-497f-98fa-7588bc02d9b0","Type":"ContainerStarted","Data":"01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.186002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c7490c0-7ddc-497f-98fa-7588bc02d9b0","Type":"ContainerStarted","Data":"76b1a86bd0631dab7baecc95b64d2df1f280121bb7dc9130a4f864aa595f5959"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.188597 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"25adb9bf-3f43-4e42-a7c6-00b8a1683722","Type":"ContainerStarted","Data":"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.188646 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"25adb9bf-3f43-4e42-a7c6-00b8a1683722","Type":"ContainerStarted","Data":"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.188661 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"25adb9bf-3f43-4e42-a7c6-00b8a1683722","Type":"ContainerStarted","Data":"b25d3bd5ff139730261cb8a2456955e9cfac0cbb092e46f0d41a91ed2945c472"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.190774 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-27hfs" event={"ID":"2c3aaa36-829f-4244-a40a-73810966b9ea","Type":"ContainerStarted","Data":"9261a792a7599a6ceb7722c629f10ec25b113c5f241ccbfc0384341deb7afe53"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.190819 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-27hfs" event={"ID":"2c3aaa36-829f-4244-a40a-73810966b9ea","Type":"ContainerStarted","Data":"7e4c1c703442f0eb6930da9b41ba4e1fb60f8971822224f98c78715471f52c8e"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.192544 4857 generic.go:334] "Generic (PLEG): container finished" podID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerID="82618a00a08658c83726259d4c81250ccb1c1ccff53c5f84a30452d7d3416bcf" exitCode=0 Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.192595 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" event={"ID":"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a","Type":"ContainerDied","Data":"82618a00a08658c83726259d4c81250ccb1c1ccff53c5f84a30452d7d3416bcf"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.192613 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" event={"ID":"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a","Type":"ContainerStarted","Data":"2413bc0537cbd00aea2543e902e5114253165bab0149ae27fb60fcf9243672fc"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.195360 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30","Type":"ContainerStarted","Data":"91c0642a896f25bf1a4506c6375db6f3ccfc047045afb90b89b1e7f9295ef104"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.195407 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30","Type":"ContainerStarted","Data":"8e28893232b60876d1d2ddb81eeb5cadf479b63c526378f8afc83872f33ec211"} Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.220705 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.220688435 podStartE2EDuration="2.220688435s" podCreationTimestamp="2025-11-28 15:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:22.195280357 +0000 UTC m=+5772.319221804" watchObservedRunningTime="2025-11-28 15:05:22.220688435 +0000 UTC m=+5772.344629872" Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.240394 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-27hfs" podStartSLOduration=3.240378191 podStartE2EDuration="3.240378191s" podCreationTimestamp="2025-11-28 15:05:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:22.234017681 +0000 UTC m=+5772.357959108" watchObservedRunningTime="2025-11-28 15:05:22.240378191 +0000 UTC m=+5772.364319628" Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.291462 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.291445085 podStartE2EDuration="2.291445085s" podCreationTimestamp="2025-11-28 15:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:22.287394827 +0000 UTC m=+5772.411336264" watchObservedRunningTime="2025-11-28 15:05:22.291445085 +0000 UTC m=+5772.415386522" Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.298680 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.334871 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.334820154 podStartE2EDuration="2.334820154s" podCreationTimestamp="2025-11-28 15:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:22.310608907 +0000 UTC m=+5772.434550344" watchObservedRunningTime="2025-11-28 15:05:22.334820154 +0000 UTC m=+5772.458761591" Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.420306 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-99ghx"] Nov 28 15:05:22 crc kubenswrapper[4857]: I1128 15:05:22.466221 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c2mg9"] Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.204246 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-99ghx" event={"ID":"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03","Type":"ContainerStarted","Data":"40c4582a3db9563d73dd7bb2b12b84b0c9effbee30b40723e9e832dd7a3f9fee"} Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.204568 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-99ghx" event={"ID":"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03","Type":"ContainerStarted","Data":"0e46b9cca438a5f8934b7a901eac0b025f4fe646e923b9647b5061f09090494d"} Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.207890 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30","Type":"ContainerStarted","Data":"2b7e1887d904816ccae33ba86288a02d75f22b7115913dda96aa5846d28015ec"} Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.210503 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" event={"ID":"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a","Type":"ContainerStarted","Data":"31956e593877833bd9af85f7019922c6a345dd26222ba25cf15855f4102287d4"} Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.232822 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-99ghx" podStartSLOduration=2.23280446 podStartE2EDuration="2.23280446s" podCreationTimestamp="2025-11-28 15:05:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:23.231380642 +0000 UTC m=+5773.355322119" watchObservedRunningTime="2025-11-28 15:05:23.23280446 +0000 UTC m=+5773.356745887" Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.254496 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" podStartSLOduration=3.254473329 podStartE2EDuration="3.254473329s" podCreationTimestamp="2025-11-28 15:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:23.253472892 +0000 UTC m=+5773.377414369" watchObservedRunningTime="2025-11-28 15:05:23.254473329 +0000 UTC m=+5773.378414776" Nov 28 15:05:23 crc kubenswrapper[4857]: I1128 15:05:23.298815 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.298793272 podStartE2EDuration="3.298793272s" podCreationTimestamp="2025-11-28 15:05:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:23.295448043 +0000 UTC m=+5773.419389500" watchObservedRunningTime="2025-11-28 15:05:23.298793272 +0000 UTC m=+5773.422734709" Nov 28 15:05:24 crc kubenswrapper[4857]: I1128 15:05:24.221501 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c2mg9" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="registry-server" containerID="cri-o://c74c856a899b9d5c2e547d04d22bc1bd3f90653812ae75b653ac36de7896a1cb" gracePeriod=2 Nov 28 15:05:24 crc kubenswrapper[4857]: I1128 15:05:24.222085 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.233406 4857 generic.go:334] "Generic (PLEG): container finished" podID="52759ded-d5c3-4670-8be4-387b171ac97d" containerID="c74c856a899b9d5c2e547d04d22bc1bd3f90653812ae75b653ac36de7896a1cb" exitCode=0 Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.233469 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2mg9" event={"ID":"52759ded-d5c3-4670-8be4-387b171ac97d","Type":"ContainerDied","Data":"c74c856a899b9d5c2e547d04d22bc1bd3f90653812ae75b653ac36de7896a1cb"} Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.234078 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c2mg9" event={"ID":"52759ded-d5c3-4670-8be4-387b171ac97d","Type":"ContainerDied","Data":"fe336536d7b97d567c50ede5a3de9583ce9ce153d834cdb4badf4cde7f899946"} Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.234124 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe336536d7b97d567c50ede5a3de9583ce9ce153d834cdb4badf4cde7f899946" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.375613 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.523313 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-utilities\") pod \"52759ded-d5c3-4670-8be4-387b171ac97d\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.523361 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-catalog-content\") pod \"52759ded-d5c3-4670-8be4-387b171ac97d\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.523412 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nsv5p\" (UniqueName: \"kubernetes.io/projected/52759ded-d5c3-4670-8be4-387b171ac97d-kube-api-access-nsv5p\") pod \"52759ded-d5c3-4670-8be4-387b171ac97d\" (UID: \"52759ded-d5c3-4670-8be4-387b171ac97d\") " Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.524909 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-utilities" (OuterVolumeSpecName: "utilities") pod "52759ded-d5c3-4670-8be4-387b171ac97d" (UID: "52759ded-d5c3-4670-8be4-387b171ac97d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.531871 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52759ded-d5c3-4670-8be4-387b171ac97d-kube-api-access-nsv5p" (OuterVolumeSpecName: "kube-api-access-nsv5p") pod "52759ded-d5c3-4670-8be4-387b171ac97d" (UID: "52759ded-d5c3-4670-8be4-387b171ac97d"). InnerVolumeSpecName "kube-api-access-nsv5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.602195 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52759ded-d5c3-4670-8be4-387b171ac97d" (UID: "52759ded-d5c3-4670-8be4-387b171ac97d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.625823 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.625879 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52759ded-d5c3-4670-8be4-387b171ac97d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.625896 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nsv5p\" (UniqueName: \"kubernetes.io/projected/52759ded-d5c3-4670-8be4-387b171ac97d-kube-api-access-nsv5p\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.759841 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.888119 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.888218 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:05:25 crc kubenswrapper[4857]: I1128 15:05:25.939634 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:26 crc kubenswrapper[4857]: I1128 15:05:26.248415 4857 generic.go:334] "Generic (PLEG): container finished" podID="7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" containerID="40c4582a3db9563d73dd7bb2b12b84b0c9effbee30b40723e9e832dd7a3f9fee" exitCode=0 Nov 28 15:05:26 crc kubenswrapper[4857]: I1128 15:05:26.248513 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c2mg9" Nov 28 15:05:26 crc kubenswrapper[4857]: I1128 15:05:26.248510 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-99ghx" event={"ID":"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03","Type":"ContainerDied","Data":"40c4582a3db9563d73dd7bb2b12b84b0c9effbee30b40723e9e832dd7a3f9fee"} Nov 28 15:05:26 crc kubenswrapper[4857]: I1128 15:05:26.298894 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c2mg9"] Nov 28 15:05:26 crc kubenswrapper[4857]: I1128 15:05:26.317451 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c2mg9"] Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.269019 4857 generic.go:334] "Generic (PLEG): container finished" podID="2c3aaa36-829f-4244-a40a-73810966b9ea" containerID="9261a792a7599a6ceb7722c629f10ec25b113c5f241ccbfc0384341deb7afe53" exitCode=0 Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.269282 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-27hfs" event={"ID":"2c3aaa36-829f-4244-a40a-73810966b9ea","Type":"ContainerDied","Data":"9261a792a7599a6ceb7722c629f10ec25b113c5f241ccbfc0384341deb7afe53"} Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.656211 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.776222 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-scripts\") pod \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.776858 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-combined-ca-bundle\") pod \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.776993 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmgvc\" (UniqueName: \"kubernetes.io/projected/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-kube-api-access-vmgvc\") pod \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.777134 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-config-data\") pod \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\" (UID: \"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03\") " Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.784139 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-kube-api-access-vmgvc" (OuterVolumeSpecName: "kube-api-access-vmgvc") pod "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" (UID: "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03"). InnerVolumeSpecName "kube-api-access-vmgvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.784432 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-scripts" (OuterVolumeSpecName: "scripts") pod "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" (UID: "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.804510 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-config-data" (OuterVolumeSpecName: "config-data") pod "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" (UID: "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.821983 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" (UID: "7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.879677 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.879721 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.879730 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:27 crc kubenswrapper[4857]: I1128 15:05:27.879742 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmgvc\" (UniqueName: \"kubernetes.io/projected/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03-kube-api-access-vmgvc\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.247827 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" path="/var/lib/kubelet/pods/52759ded-d5c3-4670-8be4-387b171ac97d/volumes" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.283381 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-99ghx" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.283371 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-99ghx" event={"ID":"7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03","Type":"ContainerDied","Data":"0e46b9cca438a5f8934b7a901eac0b025f4fe646e923b9647b5061f09090494d"} Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.283465 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0e46b9cca438a5f8934b7a901eac0b025f4fe646e923b9647b5061f09090494d" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.483874 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:05:28 crc kubenswrapper[4857]: E1128 15:05:28.484712 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" containerName="nova-cell1-conductor-db-sync" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.484730 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" containerName="nova-cell1-conductor-db-sync" Nov 28 15:05:28 crc kubenswrapper[4857]: E1128 15:05:28.484759 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="extract-content" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.484766 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="extract-content" Nov 28 15:05:28 crc kubenswrapper[4857]: E1128 15:05:28.484776 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="extract-utilities" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.484783 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="extract-utilities" Nov 28 15:05:28 crc kubenswrapper[4857]: E1128 15:05:28.484801 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="registry-server" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.484807 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="registry-server" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.484976 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" containerName="nova-cell1-conductor-db-sync" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.484990 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="52759ded-d5c3-4670-8be4-387b171ac97d" containerName="registry-server" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.485655 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.492106 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.500593 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.604000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ps9b\" (UniqueName: \"kubernetes.io/projected/e106e94c-ae77-44e4-a276-4c01117a17f3-kube-api-access-2ps9b\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.604458 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.604550 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.706907 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ps9b\" (UniqueName: \"kubernetes.io/projected/e106e94c-ae77-44e4-a276-4c01117a17f3-kube-api-access-2ps9b\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.707031 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.707060 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.712238 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.714655 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.724526 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ps9b\" (UniqueName: \"kubernetes.io/projected/e106e94c-ae77-44e4-a276-4c01117a17f3-kube-api-access-2ps9b\") pod \"nova-cell1-conductor-0\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.787471 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.812487 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.916558 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-scripts\") pod \"2c3aaa36-829f-4244-a40a-73810966b9ea\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.917295 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldbgn\" (UniqueName: \"kubernetes.io/projected/2c3aaa36-829f-4244-a40a-73810966b9ea-kube-api-access-ldbgn\") pod \"2c3aaa36-829f-4244-a40a-73810966b9ea\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.917355 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-config-data\") pod \"2c3aaa36-829f-4244-a40a-73810966b9ea\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.917394 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-combined-ca-bundle\") pod \"2c3aaa36-829f-4244-a40a-73810966b9ea\" (UID: \"2c3aaa36-829f-4244-a40a-73810966b9ea\") " Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.936336 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c3aaa36-829f-4244-a40a-73810966b9ea-kube-api-access-ldbgn" (OuterVolumeSpecName: "kube-api-access-ldbgn") pod "2c3aaa36-829f-4244-a40a-73810966b9ea" (UID: "2c3aaa36-829f-4244-a40a-73810966b9ea"). InnerVolumeSpecName "kube-api-access-ldbgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.944178 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-scripts" (OuterVolumeSpecName: "scripts") pod "2c3aaa36-829f-4244-a40a-73810966b9ea" (UID: "2c3aaa36-829f-4244-a40a-73810966b9ea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.956140 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c3aaa36-829f-4244-a40a-73810966b9ea" (UID: "2c3aaa36-829f-4244-a40a-73810966b9ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:28 crc kubenswrapper[4857]: I1128 15:05:28.970149 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-config-data" (OuterVolumeSpecName: "config-data") pod "2c3aaa36-829f-4244-a40a-73810966b9ea" (UID: "2c3aaa36-829f-4244-a40a-73810966b9ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.020474 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.020510 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldbgn\" (UniqueName: \"kubernetes.io/projected/2c3aaa36-829f-4244-a40a-73810966b9ea-kube-api-access-ldbgn\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.020522 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.020532 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c3aaa36-829f-4244-a40a-73810966b9ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.306931 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.311524 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-27hfs" event={"ID":"2c3aaa36-829f-4244-a40a-73810966b9ea","Type":"ContainerDied","Data":"7e4c1c703442f0eb6930da9b41ba4e1fb60f8971822224f98c78715471f52c8e"} Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.311565 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-27hfs" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.311587 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e4c1c703442f0eb6930da9b41ba4e1fb60f8971822224f98c78715471f52c8e" Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.487737 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.490020 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-log" containerID="cri-o://55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8" gracePeriod=30 Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.490519 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-api" containerID="cri-o://fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2" gracePeriod=30 Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.508240 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.508830 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7c7490c0-7ddc-497f-98fa-7588bc02d9b0" containerName="nova-scheduler-scheduler" containerID="cri-o://01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315" gracePeriod=30 Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.544161 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.544485 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-log" containerID="cri-o://91c0642a896f25bf1a4506c6375db6f3ccfc047045afb90b89b1e7f9295ef104" gracePeriod=30 Nov 28 15:05:29 crc kubenswrapper[4857]: I1128 15:05:29.544830 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-metadata" containerID="cri-o://2b7e1887d904816ccae33ba86288a02d75f22b7115913dda96aa5846d28015ec" gracePeriod=30 Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.297914 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328331 4857 generic.go:334] "Generic (PLEG): container finished" podID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerID="fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2" exitCode=0 Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328363 4857 generic.go:334] "Generic (PLEG): container finished" podID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerID="55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8" exitCode=143 Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328399 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"25adb9bf-3f43-4e42-a7c6-00b8a1683722","Type":"ContainerDied","Data":"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328425 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"25adb9bf-3f43-4e42-a7c6-00b8a1683722","Type":"ContainerDied","Data":"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328435 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"25adb9bf-3f43-4e42-a7c6-00b8a1683722","Type":"ContainerDied","Data":"b25d3bd5ff139730261cb8a2456955e9cfac0cbb092e46f0d41a91ed2945c472"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328451 4857 scope.go:117] "RemoveContainer" containerID="fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.328596 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.331208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e106e94c-ae77-44e4-a276-4c01117a17f3","Type":"ContainerStarted","Data":"b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.331236 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e106e94c-ae77-44e4-a276-4c01117a17f3","Type":"ContainerStarted","Data":"1dc6a5c0a863de65e7778d5f873266e9b1cfb9b09c420a8c2692a5170a9810d0"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.332246 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.334750 4857 generic.go:334] "Generic (PLEG): container finished" podID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerID="2b7e1887d904816ccae33ba86288a02d75f22b7115913dda96aa5846d28015ec" exitCode=0 Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.334783 4857 generic.go:334] "Generic (PLEG): container finished" podID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerID="91c0642a896f25bf1a4506c6375db6f3ccfc047045afb90b89b1e7f9295ef104" exitCode=143 Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.334802 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30","Type":"ContainerDied","Data":"2b7e1887d904816ccae33ba86288a02d75f22b7115913dda96aa5846d28015ec"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.334824 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30","Type":"ContainerDied","Data":"91c0642a896f25bf1a4506c6375db6f3ccfc047045afb90b89b1e7f9295ef104"} Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.368832 4857 scope.go:117] "RemoveContainer" containerID="55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.430818 4857 scope.go:117] "RemoveContainer" containerID="fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2" Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.434187 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2\": container with ID starting with fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2 not found: ID does not exist" containerID="fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.434240 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2"} err="failed to get container status \"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2\": rpc error: code = NotFound desc = could not find container \"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2\": container with ID starting with fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2 not found: ID does not exist" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.434267 4857 scope.go:117] "RemoveContainer" containerID="55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8" Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.434759 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8\": container with ID starting with 55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8 not found: ID does not exist" containerID="55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.434791 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8"} err="failed to get container status \"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8\": rpc error: code = NotFound desc = could not find container \"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8\": container with ID starting with 55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8 not found: ID does not exist" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.434807 4857 scope.go:117] "RemoveContainer" containerID="fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.435124 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2"} err="failed to get container status \"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2\": rpc error: code = NotFound desc = could not find container \"fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2\": container with ID starting with fd5580235cefcc136acf7417ff8ef2f9a0722a23bd2e797591c2a64effa494a2 not found: ID does not exist" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.435146 4857 scope.go:117] "RemoveContainer" containerID="55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.435347 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8"} err="failed to get container status \"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8\": rpc error: code = NotFound desc = could not find container \"55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8\": container with ID starting with 55530b11608b9c0d5b44676b14cc2145c297d9a327a5cdeb4e5bcabbeb553ec8 not found: ID does not exist" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.435443 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.454866 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.454796244 podStartE2EDuration="2.454796244s" podCreationTimestamp="2025-11-28 15:05:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:30.356440007 +0000 UTC m=+5780.480381444" watchObservedRunningTime="2025-11-28 15:05:30.454796244 +0000 UTC m=+5780.578737681" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.464404 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbdkh\" (UniqueName: \"kubernetes.io/projected/25adb9bf-3f43-4e42-a7c6-00b8a1683722-kube-api-access-dbdkh\") pod \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.464558 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25adb9bf-3f43-4e42-a7c6-00b8a1683722-logs\") pod \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.464683 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-config-data\") pod \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.464787 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-combined-ca-bundle\") pod \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\" (UID: \"25adb9bf-3f43-4e42-a7c6-00b8a1683722\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.474058 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25adb9bf-3f43-4e42-a7c6-00b8a1683722-logs" (OuterVolumeSpecName: "logs") pod "25adb9bf-3f43-4e42-a7c6-00b8a1683722" (UID: "25adb9bf-3f43-4e42-a7c6-00b8a1683722"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.474745 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25adb9bf-3f43-4e42-a7c6-00b8a1683722-kube-api-access-dbdkh" (OuterVolumeSpecName: "kube-api-access-dbdkh") pod "25adb9bf-3f43-4e42-a7c6-00b8a1683722" (UID: "25adb9bf-3f43-4e42-a7c6-00b8a1683722"). InnerVolumeSpecName "kube-api-access-dbdkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.500408 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-config-data" (OuterVolumeSpecName: "config-data") pod "25adb9bf-3f43-4e42-a7c6-00b8a1683722" (UID: "25adb9bf-3f43-4e42-a7c6-00b8a1683722"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.513643 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "25adb9bf-3f43-4e42-a7c6-00b8a1683722" (UID: "25adb9bf-3f43-4e42-a7c6-00b8a1683722"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.566575 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-logs\") pod \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.566704 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-config-data\") pod \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.566777 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8gfq\" (UniqueName: \"kubernetes.io/projected/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-kube-api-access-r8gfq\") pod \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.566831 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-combined-ca-bundle\") pod \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\" (UID: \"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30\") " Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.567202 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/25adb9bf-3f43-4e42-a7c6-00b8a1683722-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.567219 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.567229 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/25adb9bf-3f43-4e42-a7c6-00b8a1683722-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.567240 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbdkh\" (UniqueName: \"kubernetes.io/projected/25adb9bf-3f43-4e42-a7c6-00b8a1683722-kube-api-access-dbdkh\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.567363 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-logs" (OuterVolumeSpecName: "logs") pod "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" (UID: "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.570244 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-kube-api-access-r8gfq" (OuterVolumeSpecName: "kube-api-access-r8gfq") pod "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" (UID: "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30"). InnerVolumeSpecName "kube-api-access-r8gfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.597222 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" (UID: "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.616021 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-config-data" (OuterVolumeSpecName: "config-data") pod "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" (UID: "6e769330-c4f2-4f98-bdbb-e4c70cfb9c30"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.662819 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.672873 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.674923 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.675053 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.675077 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.675093 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8gfq\" (UniqueName: \"kubernetes.io/projected/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30-kube-api-access-r8gfq\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.690114 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.690726 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c3aaa36-829f-4244-a40a-73810966b9ea" containerName="nova-manage" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.690757 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c3aaa36-829f-4244-a40a-73810966b9ea" containerName="nova-manage" Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.690795 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-log" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.690806 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-log" Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.690825 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-log" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.690833 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-log" Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.690855 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-api" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.690870 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-api" Nov 28 15:05:30 crc kubenswrapper[4857]: E1128 15:05:30.690886 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-metadata" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.690895 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-metadata" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.691167 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-api" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.691190 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-metadata" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.691207 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" containerName="nova-api-log" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.691223 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" containerName="nova-metadata-log" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.691242 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c3aaa36-829f-4244-a40a-73810966b9ea" containerName="nova-manage" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.692475 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.697190 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.704576 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.777293 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-config-data\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.777358 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.777582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-logs\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.777978 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csrkg\" (UniqueName: \"kubernetes.io/projected/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-kube-api-access-csrkg\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.880203 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-logs\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.880327 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csrkg\" (UniqueName: \"kubernetes.io/projected/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-kube-api-access-csrkg\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.880418 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-config-data\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.880459 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.881674 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-logs\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.885430 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.886795 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-config-data\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.899473 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csrkg\" (UniqueName: \"kubernetes.io/projected/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-kube-api-access-csrkg\") pod \"nova-api-0\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " pod="openstack/nova-api-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.939711 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.960613 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:30 crc kubenswrapper[4857]: I1128 15:05:30.969162 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.065275 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f58c67bc9-qv6mh"] Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.070001 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.076204 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" podUID="387fe7ff-da63-4769-a0f8-312523c30249" containerName="dnsmasq-dns" containerID="cri-o://1c32d4cff9c2c1ef0466922ac39d1338cf25b645aedc91be88dd3bddf5c6c288" gracePeriod=10 Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.354113 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6e769330-c4f2-4f98-bdbb-e4c70cfb9c30","Type":"ContainerDied","Data":"8e28893232b60876d1d2ddb81eeb5cadf479b63c526378f8afc83872f33ec211"} Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.354641 4857 scope.go:117] "RemoveContainer" containerID="2b7e1887d904816ccae33ba86288a02d75f22b7115913dda96aa5846d28015ec" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.354127 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.362099 4857 generic.go:334] "Generic (PLEG): container finished" podID="387fe7ff-da63-4769-a0f8-312523c30249" containerID="1c32d4cff9c2c1ef0466922ac39d1338cf25b645aedc91be88dd3bddf5c6c288" exitCode=0 Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.362212 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" event={"ID":"387fe7ff-da63-4769-a0f8-312523c30249","Type":"ContainerDied","Data":"1c32d4cff9c2c1ef0466922ac39d1338cf25b645aedc91be88dd3bddf5c6c288"} Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.378171 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.385369 4857 scope.go:117] "RemoveContainer" containerID="91c0642a896f25bf1a4506c6375db6f3ccfc047045afb90b89b1e7f9295ef104" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.494801 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.512079 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.524135 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.534353 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.536997 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.545998 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.603546 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.608458 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-config-data\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.609201 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.609301 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a08be057-917a-4491-9e59-20a10d296b2a-logs\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.609331 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n47ql\" (UniqueName: \"kubernetes.io/projected/a08be057-917a-4491-9e59-20a10d296b2a-kube-api-access-n47ql\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.655308 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.711481 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-config-data\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.711588 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.711691 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a08be057-917a-4491-9e59-20a10d296b2a-logs\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.711726 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n47ql\" (UniqueName: \"kubernetes.io/projected/a08be057-917a-4491-9e59-20a10d296b2a-kube-api-access-n47ql\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.713672 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a08be057-917a-4491-9e59-20a10d296b2a-logs\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.725240 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-config-data\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.730787 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.733280 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n47ql\" (UniqueName: \"kubernetes.io/projected/a08be057-917a-4491-9e59-20a10d296b2a-kube-api-access-n47ql\") pod \"nova-metadata-0\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.813226 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-nb\") pod \"387fe7ff-da63-4769-a0f8-312523c30249\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.813393 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8mdz\" (UniqueName: \"kubernetes.io/projected/387fe7ff-da63-4769-a0f8-312523c30249-kube-api-access-m8mdz\") pod \"387fe7ff-da63-4769-a0f8-312523c30249\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.813421 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-config\") pod \"387fe7ff-da63-4769-a0f8-312523c30249\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.813449 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-sb\") pod \"387fe7ff-da63-4769-a0f8-312523c30249\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.813506 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-dns-svc\") pod \"387fe7ff-da63-4769-a0f8-312523c30249\" (UID: \"387fe7ff-da63-4769-a0f8-312523c30249\") " Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.829725 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/387fe7ff-da63-4769-a0f8-312523c30249-kube-api-access-m8mdz" (OuterVolumeSpecName: "kube-api-access-m8mdz") pod "387fe7ff-da63-4769-a0f8-312523c30249" (UID: "387fe7ff-da63-4769-a0f8-312523c30249"). InnerVolumeSpecName "kube-api-access-m8mdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.856688 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.877252 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "387fe7ff-da63-4769-a0f8-312523c30249" (UID: "387fe7ff-da63-4769-a0f8-312523c30249"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.897515 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-config" (OuterVolumeSpecName: "config") pod "387fe7ff-da63-4769-a0f8-312523c30249" (UID: "387fe7ff-da63-4769-a0f8-312523c30249"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.912386 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "387fe7ff-da63-4769-a0f8-312523c30249" (UID: "387fe7ff-da63-4769-a0f8-312523c30249"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.914671 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "387fe7ff-da63-4769-a0f8-312523c30249" (UID: "387fe7ff-da63-4769-a0f8-312523c30249"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.916275 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8mdz\" (UniqueName: \"kubernetes.io/projected/387fe7ff-da63-4769-a0f8-312523c30249-kube-api-access-m8mdz\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.916360 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.916413 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.916464 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:31 crc kubenswrapper[4857]: I1128 15:05:31.916512 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/387fe7ff-da63-4769-a0f8-312523c30249-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.253593 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25adb9bf-3f43-4e42-a7c6-00b8a1683722" path="/var/lib/kubelet/pods/25adb9bf-3f43-4e42-a7c6-00b8a1683722/volumes" Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.256507 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e769330-c4f2-4f98-bdbb-e4c70cfb9c30" path="/var/lib/kubelet/pods/6e769330-c4f2-4f98-bdbb-e4c70cfb9c30/volumes" Nov 28 15:05:32 crc kubenswrapper[4857]: W1128 15:05:32.336440 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda08be057_917a_4491_9e59_20a10d296b2a.slice/crio-57ac22b265fd0593735d2f2f36b2b7a059f61842127b6a4d835d5695b253d201 WatchSource:0}: Error finding container 57ac22b265fd0593735d2f2f36b2b7a059f61842127b6a4d835d5695b253d201: Status 404 returned error can't find the container with id 57ac22b265fd0593735d2f2f36b2b7a059f61842127b6a4d835d5695b253d201 Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.339877 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.380767 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" event={"ID":"387fe7ff-da63-4769-a0f8-312523c30249","Type":"ContainerDied","Data":"4b8f16805114b297d6e079bf15b69571c403696e7f24fb4bd1c6876718e1603e"} Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.380818 4857 scope.go:117] "RemoveContainer" containerID="1c32d4cff9c2c1ef0466922ac39d1338cf25b645aedc91be88dd3bddf5c6c288" Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.381034 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f58c67bc9-qv6mh" Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.383208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a08be057-917a-4491-9e59-20a10d296b2a","Type":"ContainerStarted","Data":"57ac22b265fd0593735d2f2f36b2b7a059f61842127b6a4d835d5695b253d201"} Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.390712 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8","Type":"ContainerStarted","Data":"be3f00e5daa88c1e47a5c93dc67e2287432919d48f62b9ff51ac69cf207d6ebb"} Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.390845 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8","Type":"ContainerStarted","Data":"64ece257fbee7ea41f8f00cc7b564eb5123fe83419354dcd7a380e577aa00ad5"} Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.390969 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8","Type":"ContainerStarted","Data":"6f7327b38159a3c6e42716dcccc93fcaeb7c3d6a1454b47232b4f198627fa52b"} Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.412788 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f58c67bc9-qv6mh"] Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.416669 4857 scope.go:117] "RemoveContainer" containerID="50b70fbd926bb9809512bc6fb97951d1151b10a03f2e013d45c951bc811d2e5b" Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.427803 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f58c67bc9-qv6mh"] Nov 28 15:05:32 crc kubenswrapper[4857]: I1128 15:05:32.436456 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.436428334 podStartE2EDuration="2.436428334s" podCreationTimestamp="2025-11-28 15:05:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:32.421982978 +0000 UTC m=+5782.545924425" watchObservedRunningTime="2025-11-28 15:05:32.436428334 +0000 UTC m=+5782.560369771" Nov 28 15:05:33 crc kubenswrapper[4857]: I1128 15:05:33.401399 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a08be057-917a-4491-9e59-20a10d296b2a","Type":"ContainerStarted","Data":"1e70309ea2ff21a982495766c38ec1c5867c7e98ccc2aa093c9daed711688ba4"} Nov 28 15:05:33 crc kubenswrapper[4857]: I1128 15:05:33.401862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a08be057-917a-4491-9e59-20a10d296b2a","Type":"ContainerStarted","Data":"bd7cf534a52361a26898637494602c2214a859e23458e04daa953ed8e95b2e05"} Nov 28 15:05:33 crc kubenswrapper[4857]: I1128 15:05:33.431774 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.4317486600000002 podStartE2EDuration="2.43174866s" podCreationTimestamp="2025-11-28 15:05:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:33.420783917 +0000 UTC m=+5783.544725344" watchObservedRunningTime="2025-11-28 15:05:33.43174866 +0000 UTC m=+5783.555690097" Nov 28 15:05:33 crc kubenswrapper[4857]: I1128 15:05:33.936191 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.075421 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-config-data\") pod \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.075967 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69cq7\" (UniqueName: \"kubernetes.io/projected/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-kube-api-access-69cq7\") pod \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.076258 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-combined-ca-bundle\") pod \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\" (UID: \"7c7490c0-7ddc-497f-98fa-7588bc02d9b0\") " Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.083767 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-kube-api-access-69cq7" (OuterVolumeSpecName: "kube-api-access-69cq7") pod "7c7490c0-7ddc-497f-98fa-7588bc02d9b0" (UID: "7c7490c0-7ddc-497f-98fa-7588bc02d9b0"). InnerVolumeSpecName "kube-api-access-69cq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.116041 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c7490c0-7ddc-497f-98fa-7588bc02d9b0" (UID: "7c7490c0-7ddc-497f-98fa-7588bc02d9b0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.118053 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-config-data" (OuterVolumeSpecName: "config-data") pod "7c7490c0-7ddc-497f-98fa-7588bc02d9b0" (UID: "7c7490c0-7ddc-497f-98fa-7588bc02d9b0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.178930 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.179017 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69cq7\" (UniqueName: \"kubernetes.io/projected/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-kube-api-access-69cq7\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.179041 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c7490c0-7ddc-497f-98fa-7588bc02d9b0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.248069 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="387fe7ff-da63-4769-a0f8-312523c30249" path="/var/lib/kubelet/pods/387fe7ff-da63-4769-a0f8-312523c30249/volumes" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.421979 4857 generic.go:334] "Generic (PLEG): container finished" podID="7c7490c0-7ddc-497f-98fa-7588bc02d9b0" containerID="01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315" exitCode=0 Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.422110 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.422114 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c7490c0-7ddc-497f-98fa-7588bc02d9b0","Type":"ContainerDied","Data":"01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315"} Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.423452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7c7490c0-7ddc-497f-98fa-7588bc02d9b0","Type":"ContainerDied","Data":"76b1a86bd0631dab7baecc95b64d2df1f280121bb7dc9130a4f864aa595f5959"} Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.423493 4857 scope.go:117] "RemoveContainer" containerID="01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.473451 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.499861 4857 scope.go:117] "RemoveContainer" containerID="01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315" Nov 28 15:05:34 crc kubenswrapper[4857]: E1128 15:05:34.500651 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315\": container with ID starting with 01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315 not found: ID does not exist" containerID="01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.500749 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315"} err="failed to get container status \"01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315\": rpc error: code = NotFound desc = could not find container \"01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315\": container with ID starting with 01a803973671fb56f8e1bc6393a042412fc94dc34f650e38832131bc8ec95315 not found: ID does not exist" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.502142 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.515582 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:34 crc kubenswrapper[4857]: E1128 15:05:34.516199 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c7490c0-7ddc-497f-98fa-7588bc02d9b0" containerName="nova-scheduler-scheduler" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.516233 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c7490c0-7ddc-497f-98fa-7588bc02d9b0" containerName="nova-scheduler-scheduler" Nov 28 15:05:34 crc kubenswrapper[4857]: E1128 15:05:34.516256 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387fe7ff-da63-4769-a0f8-312523c30249" containerName="init" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.516268 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="387fe7ff-da63-4769-a0f8-312523c30249" containerName="init" Nov 28 15:05:34 crc kubenswrapper[4857]: E1128 15:05:34.516285 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="387fe7ff-da63-4769-a0f8-312523c30249" containerName="dnsmasq-dns" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.516294 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="387fe7ff-da63-4769-a0f8-312523c30249" containerName="dnsmasq-dns" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.516593 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="387fe7ff-da63-4769-a0f8-312523c30249" containerName="dnsmasq-dns" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.516630 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c7490c0-7ddc-497f-98fa-7588bc02d9b0" containerName="nova-scheduler-scheduler" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.517669 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.520313 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.528513 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.691122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4n24\" (UniqueName: \"kubernetes.io/projected/b5a3d538-13e4-46b8-a367-7694a0c8c54f-kube-api-access-c4n24\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.691374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.691509 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-config-data\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.793582 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.794091 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-config-data\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.794284 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4n24\" (UniqueName: \"kubernetes.io/projected/b5a3d538-13e4-46b8-a367-7694a0c8c54f-kube-api-access-c4n24\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.801831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.807661 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-config-data\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.825497 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4n24\" (UniqueName: \"kubernetes.io/projected/b5a3d538-13e4-46b8-a367-7694a0c8c54f-kube-api-access-c4n24\") pod \"nova-scheduler-0\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:34 crc kubenswrapper[4857]: I1128 15:05:34.843109 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.128009 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:35 crc kubenswrapper[4857]: W1128 15:05:35.137447 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5a3d538_13e4_46b8_a367_7694a0c8c54f.slice/crio-702740df9e4cf7e44c797570c3d4bde8fc79b1d58b767de001ce634007f1adf0 WatchSource:0}: Error finding container 702740df9e4cf7e44c797570c3d4bde8fc79b1d58b767de001ce634007f1adf0: Status 404 returned error can't find the container with id 702740df9e4cf7e44c797570c3d4bde8fc79b1d58b767de001ce634007f1adf0 Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.338718 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gxjfh"] Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.341436 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.347642 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gxjfh"] Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.441494 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b5a3d538-13e4-46b8-a367-7694a0c8c54f","Type":"ContainerStarted","Data":"9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c"} Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.441564 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b5a3d538-13e4-46b8-a367-7694a0c8c54f","Type":"ContainerStarted","Data":"702740df9e4cf7e44c797570c3d4bde8fc79b1d58b767de001ce634007f1adf0"} Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.458421 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.458397033 podStartE2EDuration="1.458397033s" podCreationTimestamp="2025-11-28 15:05:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:35.456771549 +0000 UTC m=+5785.580712996" watchObservedRunningTime="2025-11-28 15:05:35.458397033 +0000 UTC m=+5785.582338480" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.506655 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-utilities\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.507237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-catalog-content\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.507308 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7hr7\" (UniqueName: \"kubernetes.io/projected/5b9d00f2-539c-431b-bf1a-5934e2f0e904-kube-api-access-f7hr7\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.609213 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-catalog-content\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.609270 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7hr7\" (UniqueName: \"kubernetes.io/projected/5b9d00f2-539c-431b-bf1a-5934e2f0e904-kube-api-access-f7hr7\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.609313 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-utilities\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.609746 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-catalog-content\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.610302 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-utilities\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.647363 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7hr7\" (UniqueName: \"kubernetes.io/projected/5b9d00f2-539c-431b-bf1a-5934e2f0e904-kube-api-access-f7hr7\") pod \"redhat-operators-gxjfh\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:35 crc kubenswrapper[4857]: I1128 15:05:35.685625 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:36 crc kubenswrapper[4857]: I1128 15:05:36.265999 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c7490c0-7ddc-497f-98fa-7588bc02d9b0" path="/var/lib/kubelet/pods/7c7490c0-7ddc-497f-98fa-7588bc02d9b0/volumes" Nov 28 15:05:36 crc kubenswrapper[4857]: I1128 15:05:36.290318 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gxjfh"] Nov 28 15:05:36 crc kubenswrapper[4857]: I1128 15:05:36.463180 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerStarted","Data":"5b7e00fc4716407a846ac7a0937a681062ec5016eae862e72e41724aed495814"} Nov 28 15:05:36 crc kubenswrapper[4857]: I1128 15:05:36.857606 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:05:36 crc kubenswrapper[4857]: I1128 15:05:36.857962 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:05:37 crc kubenswrapper[4857]: I1128 15:05:37.473090 4857 generic.go:334] "Generic (PLEG): container finished" podID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerID="e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8" exitCode=0 Nov 28 15:05:37 crc kubenswrapper[4857]: I1128 15:05:37.473133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerDied","Data":"e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8"} Nov 28 15:05:38 crc kubenswrapper[4857]: I1128 15:05:38.483244 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerStarted","Data":"2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf"} Nov 28 15:05:38 crc kubenswrapper[4857]: I1128 15:05:38.865434 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.476474 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-nnqbh"] Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.479358 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.485702 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.486047 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.502800 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nnqbh"] Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.532683 4857 generic.go:334] "Generic (PLEG): container finished" podID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerID="2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf" exitCode=0 Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.532728 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerDied","Data":"2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf"} Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.595237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8x49\" (UniqueName: \"kubernetes.io/projected/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-kube-api-access-m8x49\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.595302 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-config-data\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.595344 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.595370 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-scripts\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.698403 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-config-data\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.698819 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.699002 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-scripts\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.699837 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8x49\" (UniqueName: \"kubernetes.io/projected/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-kube-api-access-m8x49\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.706273 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-scripts\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.707510 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-config-data\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.715688 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.727629 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8x49\" (UniqueName: \"kubernetes.io/projected/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-kube-api-access-m8x49\") pod \"nova-cell1-cell-mapping-nnqbh\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.819521 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:39 crc kubenswrapper[4857]: I1128 15:05:39.843694 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:05:40 crc kubenswrapper[4857]: W1128 15:05:40.348714 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda1424332_a4bd_46ca_9ca6_a2f9a0af9238.slice/crio-cf374aea16944aaa4046488c405aeb85ee6ad9274628e282242129508df06efb WatchSource:0}: Error finding container cf374aea16944aaa4046488c405aeb85ee6ad9274628e282242129508df06efb: Status 404 returned error can't find the container with id cf374aea16944aaa4046488c405aeb85ee6ad9274628e282242129508df06efb Nov 28 15:05:40 crc kubenswrapper[4857]: I1128 15:05:40.350250 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-nnqbh"] Nov 28 15:05:40 crc kubenswrapper[4857]: I1128 15:05:40.551900 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerStarted","Data":"4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640"} Nov 28 15:05:40 crc kubenswrapper[4857]: I1128 15:05:40.558493 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nnqbh" event={"ID":"a1424332-a4bd-46ca-9ca6-a2f9a0af9238","Type":"ContainerStarted","Data":"cf374aea16944aaa4046488c405aeb85ee6ad9274628e282242129508df06efb"} Nov 28 15:05:40 crc kubenswrapper[4857]: I1128 15:05:40.574725 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gxjfh" podStartSLOduration=3.064228977 podStartE2EDuration="5.574702063s" podCreationTimestamp="2025-11-28 15:05:35 +0000 UTC" firstStartedPulling="2025-11-28 15:05:37.475282166 +0000 UTC m=+5787.599223603" lastFinishedPulling="2025-11-28 15:05:39.985755242 +0000 UTC m=+5790.109696689" observedRunningTime="2025-11-28 15:05:40.572097203 +0000 UTC m=+5790.696038640" watchObservedRunningTime="2025-11-28 15:05:40.574702063 +0000 UTC m=+5790.698643500" Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.071030 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.072011 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.309162 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.309237 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.569622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nnqbh" event={"ID":"a1424332-a4bd-46ca-9ca6-a2f9a0af9238","Type":"ContainerStarted","Data":"9921428dcb2d5049ad9a65b16ce3bfd4f546d4f6fe729b54737b98ea91aa4f82"} Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.593145 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-nnqbh" podStartSLOduration=2.593125736 podStartE2EDuration="2.593125736s" podCreationTimestamp="2025-11-28 15:05:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:41.590425934 +0000 UTC m=+5791.714367381" watchObservedRunningTime="2025-11-28 15:05:41.593125736 +0000 UTC m=+5791.717067173" Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.857503 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:05:41 crc kubenswrapper[4857]: I1128 15:05:41.857566 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:05:42 crc kubenswrapper[4857]: I1128 15:05:42.153199 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.70:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:05:42 crc kubenswrapper[4857]: I1128 15:05:42.153199 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.70:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:05:42 crc kubenswrapper[4857]: I1128 15:05:42.940224 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.71:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:05:42 crc kubenswrapper[4857]: I1128 15:05:42.940533 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.71:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:05:44 crc kubenswrapper[4857]: I1128 15:05:44.843759 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:05:44 crc kubenswrapper[4857]: I1128 15:05:44.899340 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:05:45 crc kubenswrapper[4857]: I1128 15:05:45.606622 4857 generic.go:334] "Generic (PLEG): container finished" podID="a1424332-a4bd-46ca-9ca6-a2f9a0af9238" containerID="9921428dcb2d5049ad9a65b16ce3bfd4f546d4f6fe729b54737b98ea91aa4f82" exitCode=0 Nov 28 15:05:45 crc kubenswrapper[4857]: I1128 15:05:45.606699 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nnqbh" event={"ID":"a1424332-a4bd-46ca-9ca6-a2f9a0af9238","Type":"ContainerDied","Data":"9921428dcb2d5049ad9a65b16ce3bfd4f546d4f6fe729b54737b98ea91aa4f82"} Nov 28 15:05:45 crc kubenswrapper[4857]: I1128 15:05:45.646664 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:05:45 crc kubenswrapper[4857]: I1128 15:05:45.686541 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:45 crc kubenswrapper[4857]: I1128 15:05:45.686594 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:46 crc kubenswrapper[4857]: I1128 15:05:46.739119 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gxjfh" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="registry-server" probeResult="failure" output=< Nov 28 15:05:46 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 15:05:46 crc kubenswrapper[4857]: > Nov 28 15:05:46 crc kubenswrapper[4857]: I1128 15:05:46.981326 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.061524 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8x49\" (UniqueName: \"kubernetes.io/projected/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-kube-api-access-m8x49\") pod \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.062124 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-scripts\") pod \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.062347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-config-data\") pod \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.062541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-combined-ca-bundle\") pod \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\" (UID: \"a1424332-a4bd-46ca-9ca6-a2f9a0af9238\") " Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.069342 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-kube-api-access-m8x49" (OuterVolumeSpecName: "kube-api-access-m8x49") pod "a1424332-a4bd-46ca-9ca6-a2f9a0af9238" (UID: "a1424332-a4bd-46ca-9ca6-a2f9a0af9238"). InnerVolumeSpecName "kube-api-access-m8x49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.071408 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-scripts" (OuterVolumeSpecName: "scripts") pod "a1424332-a4bd-46ca-9ca6-a2f9a0af9238" (UID: "a1424332-a4bd-46ca-9ca6-a2f9a0af9238"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.097414 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-config-data" (OuterVolumeSpecName: "config-data") pod "a1424332-a4bd-46ca-9ca6-a2f9a0af9238" (UID: "a1424332-a4bd-46ca-9ca6-a2f9a0af9238"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.117165 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1424332-a4bd-46ca-9ca6-a2f9a0af9238" (UID: "a1424332-a4bd-46ca-9ca6-a2f9a0af9238"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.165603 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.165648 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.165671 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8x49\" (UniqueName: \"kubernetes.io/projected/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-kube-api-access-m8x49\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.165688 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1424332-a4bd-46ca-9ca6-a2f9a0af9238-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.628137 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-nnqbh" event={"ID":"a1424332-a4bd-46ca-9ca6-a2f9a0af9238","Type":"ContainerDied","Data":"cf374aea16944aaa4046488c405aeb85ee6ad9274628e282242129508df06efb"} Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.628188 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cf374aea16944aaa4046488c405aeb85ee6ad9274628e282242129508df06efb" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.629282 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-nnqbh" Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.937225 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.937689 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-log" containerID="cri-o://64ece257fbee7ea41f8f00cc7b564eb5123fe83419354dcd7a380e577aa00ad5" gracePeriod=30 Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.937764 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-api" containerID="cri-o://be3f00e5daa88c1e47a5c93dc67e2287432919d48f62b9ff51ac69cf207d6ebb" gracePeriod=30 Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.962789 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.963027 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" containerName="nova-scheduler-scheduler" containerID="cri-o://9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c" gracePeriod=30 Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.986423 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.986716 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-log" containerID="cri-o://bd7cf534a52361a26898637494602c2214a859e23458e04daa953ed8e95b2e05" gracePeriod=30 Nov 28 15:05:47 crc kubenswrapper[4857]: I1128 15:05:47.987194 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-metadata" containerID="cri-o://1e70309ea2ff21a982495766c38ec1c5867c7e98ccc2aa093c9daed711688ba4" gracePeriod=30 Nov 28 15:05:48 crc kubenswrapper[4857]: I1128 15:05:48.638001 4857 generic.go:334] "Generic (PLEG): container finished" podID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerID="64ece257fbee7ea41f8f00cc7b564eb5123fe83419354dcd7a380e577aa00ad5" exitCode=143 Nov 28 15:05:48 crc kubenswrapper[4857]: I1128 15:05:48.638076 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8","Type":"ContainerDied","Data":"64ece257fbee7ea41f8f00cc7b564eb5123fe83419354dcd7a380e577aa00ad5"} Nov 28 15:05:48 crc kubenswrapper[4857]: I1128 15:05:48.640687 4857 generic.go:334] "Generic (PLEG): container finished" podID="a08be057-917a-4491-9e59-20a10d296b2a" containerID="bd7cf534a52361a26898637494602c2214a859e23458e04daa953ed8e95b2e05" exitCode=143 Nov 28 15:05:48 crc kubenswrapper[4857]: I1128 15:05:48.640737 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a08be057-917a-4491-9e59-20a10d296b2a","Type":"ContainerDied","Data":"bd7cf534a52361a26898637494602c2214a859e23458e04daa953ed8e95b2e05"} Nov 28 15:05:49 crc kubenswrapper[4857]: E1128 15:05:49.846423 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:05:49 crc kubenswrapper[4857]: E1128 15:05:49.849249 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:05:49 crc kubenswrapper[4857]: E1128 15:05:49.851709 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 15:05:49 crc kubenswrapper[4857]: E1128 15:05:49.851785 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" containerName="nova-scheduler-scheduler" Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.697145 4857 generic.go:334] "Generic (PLEG): container finished" podID="a08be057-917a-4491-9e59-20a10d296b2a" containerID="1e70309ea2ff21a982495766c38ec1c5867c7e98ccc2aa093c9daed711688ba4" exitCode=0 Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.697207 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a08be057-917a-4491-9e59-20a10d296b2a","Type":"ContainerDied","Data":"1e70309ea2ff21a982495766c38ec1c5867c7e98ccc2aa093c9daed711688ba4"} Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.699550 4857 generic.go:334] "Generic (PLEG): container finished" podID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" containerID="9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c" exitCode=0 Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.699617 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b5a3d538-13e4-46b8-a367-7694a0c8c54f","Type":"ContainerDied","Data":"9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c"} Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.699653 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b5a3d538-13e4-46b8-a367-7694a0c8c54f","Type":"ContainerDied","Data":"702740df9e4cf7e44c797570c3d4bde8fc79b1d58b767de001ce634007f1adf0"} Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.699668 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="702740df9e4cf7e44c797570c3d4bde8fc79b1d58b767de001ce634007f1adf0" Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.700854 4857 generic.go:334] "Generic (PLEG): container finished" podID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerID="be3f00e5daa88c1e47a5c93dc67e2287432919d48f62b9ff51ac69cf207d6ebb" exitCode=0 Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.700878 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8","Type":"ContainerDied","Data":"be3f00e5daa88c1e47a5c93dc67e2287432919d48f62b9ff51ac69cf207d6ebb"} Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.879430 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.898725 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:52 crc kubenswrapper[4857]: I1128 15:05:52.898760 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.001863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csrkg\" (UniqueName: \"kubernetes.io/projected/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-kube-api-access-csrkg\") pod \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.001930 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a08be057-917a-4491-9e59-20a10d296b2a-logs\") pod \"a08be057-917a-4491-9e59-20a10d296b2a\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.001998 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-combined-ca-bundle\") pod \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002229 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-config-data\") pod \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002297 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-config-data\") pod \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002333 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c4n24\" (UniqueName: \"kubernetes.io/projected/b5a3d538-13e4-46b8-a367-7694a0c8c54f-kube-api-access-c4n24\") pod \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\" (UID: \"b5a3d538-13e4-46b8-a367-7694a0c8c54f\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-combined-ca-bundle\") pod \"a08be057-917a-4491-9e59-20a10d296b2a\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002498 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-combined-ca-bundle\") pod \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-config-data\") pod \"a08be057-917a-4491-9e59-20a10d296b2a\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002933 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-logs\") pod \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\" (UID: \"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002985 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n47ql\" (UniqueName: \"kubernetes.io/projected/a08be057-917a-4491-9e59-20a10d296b2a-kube-api-access-n47ql\") pod \"a08be057-917a-4491-9e59-20a10d296b2a\" (UID: \"a08be057-917a-4491-9e59-20a10d296b2a\") " Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.002612 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a08be057-917a-4491-9e59-20a10d296b2a-logs" (OuterVolumeSpecName: "logs") pod "a08be057-917a-4491-9e59-20a10d296b2a" (UID: "a08be057-917a-4491-9e59-20a10d296b2a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.005094 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-logs" (OuterVolumeSpecName: "logs") pod "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" (UID: "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.007608 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-kube-api-access-csrkg" (OuterVolumeSpecName: "kube-api-access-csrkg") pod "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" (UID: "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8"). InnerVolumeSpecName "kube-api-access-csrkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.012987 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a08be057-917a-4491-9e59-20a10d296b2a-kube-api-access-n47ql" (OuterVolumeSpecName: "kube-api-access-n47ql") pod "a08be057-917a-4491-9e59-20a10d296b2a" (UID: "a08be057-917a-4491-9e59-20a10d296b2a"). InnerVolumeSpecName "kube-api-access-n47ql". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.016490 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5a3d538-13e4-46b8-a367-7694a0c8c54f-kube-api-access-c4n24" (OuterVolumeSpecName: "kube-api-access-c4n24") pod "b5a3d538-13e4-46b8-a367-7694a0c8c54f" (UID: "b5a3d538-13e4-46b8-a367-7694a0c8c54f"). InnerVolumeSpecName "kube-api-access-c4n24". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.026199 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-config-data" (OuterVolumeSpecName: "config-data") pod "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" (UID: "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.026524 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a08be057-917a-4491-9e59-20a10d296b2a" (UID: "a08be057-917a-4491-9e59-20a10d296b2a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.031101 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-config-data" (OuterVolumeSpecName: "config-data") pod "b5a3d538-13e4-46b8-a367-7694a0c8c54f" (UID: "b5a3d538-13e4-46b8-a367-7694a0c8c54f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.034733 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" (UID: "c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.035510 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5a3d538-13e4-46b8-a367-7694a0c8c54f" (UID: "b5a3d538-13e4-46b8-a367-7694a0c8c54f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.041771 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-config-data" (OuterVolumeSpecName: "config-data") pod "a08be057-917a-4491-9e59-20a10d296b2a" (UID: "a08be057-917a-4491-9e59-20a10d296b2a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104634 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104670 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104682 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c4n24\" (UniqueName: \"kubernetes.io/projected/b5a3d538-13e4-46b8-a367-7694a0c8c54f-kube-api-access-c4n24\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104692 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104702 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104712 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a08be057-917a-4491-9e59-20a10d296b2a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104721 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n47ql\" (UniqueName: \"kubernetes.io/projected/a08be057-917a-4491-9e59-20a10d296b2a-kube-api-access-n47ql\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104770 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104778 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csrkg\" (UniqueName: \"kubernetes.io/projected/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8-kube-api-access-csrkg\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104787 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a08be057-917a-4491-9e59-20a10d296b2a-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.104795 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5a3d538-13e4-46b8-a367-7694a0c8c54f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.716152 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a08be057-917a-4491-9e59-20a10d296b2a","Type":"ContainerDied","Data":"57ac22b265fd0593735d2f2f36b2b7a059f61842127b6a4d835d5695b253d201"} Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.716229 4857 scope.go:117] "RemoveContainer" containerID="1e70309ea2ff21a982495766c38ec1c5867c7e98ccc2aa093c9daed711688ba4" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.717134 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.720810 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.725467 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8","Type":"ContainerDied","Data":"6f7327b38159a3c6e42716dcccc93fcaeb7c3d6a1454b47232b4f198627fa52b"} Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.725719 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.766568 4857 scope.go:117] "RemoveContainer" containerID="bd7cf534a52361a26898637494602c2214a859e23458e04daa953ed8e95b2e05" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.795052 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.806176 4857 scope.go:117] "RemoveContainer" containerID="be3f00e5daa88c1e47a5c93dc67e2287432919d48f62b9ff51ac69cf207d6ebb" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.828197 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.838733 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.856741 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.873665 4857 scope.go:117] "RemoveContainer" containerID="64ece257fbee7ea41f8f00cc7b564eb5123fe83419354dcd7a380e577aa00ad5" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.873816 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: E1128 15:05:53.874253 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" containerName="nova-scheduler-scheduler" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874283 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" containerName="nova-scheduler-scheduler" Nov 28 15:05:53 crc kubenswrapper[4857]: E1128 15:05:53.874305 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-log" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874316 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-log" Nov 28 15:05:53 crc kubenswrapper[4857]: E1128 15:05:53.874344 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1424332-a4bd-46ca-9ca6-a2f9a0af9238" containerName="nova-manage" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874355 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1424332-a4bd-46ca-9ca6-a2f9a0af9238" containerName="nova-manage" Nov 28 15:05:53 crc kubenswrapper[4857]: E1128 15:05:53.874389 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-log" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874398 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-log" Nov 28 15:05:53 crc kubenswrapper[4857]: E1128 15:05:53.874430 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-metadata" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874437 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-metadata" Nov 28 15:05:53 crc kubenswrapper[4857]: E1128 15:05:53.874465 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-api" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874475 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-api" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874694 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-metadata" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874734 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-log" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874753 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1424332-a4bd-46ca-9ca6-a2f9a0af9238" containerName="nova-manage" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874768 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" containerName="nova-api-api" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874783 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" containerName="nova-scheduler-scheduler" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.874803 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a08be057-917a-4491-9e59-20a10d296b2a" containerName="nova-metadata-log" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.876110 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.883896 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.895075 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.905731 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.907120 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.911285 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.915006 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.917557 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4926cdd-f4bc-4e14-8a05-d61260bb2649-logs\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.917645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-config-data\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.917740 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t8xc\" (UniqueName: \"kubernetes.io/projected/d4926cdd-f4bc-4e14-8a05-d61260bb2649-kube-api-access-9t8xc\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.917793 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.924824 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.935217 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.943061 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.944722 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.946978 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:05:53 crc kubenswrapper[4857]: I1128 15:05:53.951051 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020027 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020394 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abddfbcf-fdbc-4fae-acf2-29943c5e6732-logs\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020438 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2gf7v\" (UniqueName: \"kubernetes.io/projected/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-kube-api-access-2gf7v\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020466 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86nsw\" (UniqueName: \"kubernetes.io/projected/abddfbcf-fdbc-4fae-acf2-29943c5e6732-kube-api-access-86nsw\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020497 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t8xc\" (UniqueName: \"kubernetes.io/projected/d4926cdd-f4bc-4e14-8a05-d61260bb2649-kube-api-access-9t8xc\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020561 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020611 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4926cdd-f4bc-4e14-8a05-d61260bb2649-logs\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020645 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020751 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-config-data\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.020796 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-config-data\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.021145 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-config-data\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.021148 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4926cdd-f4bc-4e14-8a05-d61260bb2649-logs\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.027125 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-config-data\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.028097 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.040462 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t8xc\" (UniqueName: \"kubernetes.io/projected/d4926cdd-f4bc-4e14-8a05-d61260bb2649-kube-api-access-9t8xc\") pod \"nova-metadata-0\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123237 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86nsw\" (UniqueName: \"kubernetes.io/projected/abddfbcf-fdbc-4fae-acf2-29943c5e6732-kube-api-access-86nsw\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123690 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123723 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-config-data\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123754 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-config-data\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123781 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123821 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abddfbcf-fdbc-4fae-acf2-29943c5e6732-logs\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.123842 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2gf7v\" (UniqueName: \"kubernetes.io/projected/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-kube-api-access-2gf7v\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.124812 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abddfbcf-fdbc-4fae-acf2-29943c5e6732-logs\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.127324 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.127822 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.129127 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-config-data\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.130488 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-config-data\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.138526 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2gf7v\" (UniqueName: \"kubernetes.io/projected/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-kube-api-access-2gf7v\") pod \"nova-scheduler-0\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.145590 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86nsw\" (UniqueName: \"kubernetes.io/projected/abddfbcf-fdbc-4fae-acf2-29943c5e6732-kube-api-access-86nsw\") pod \"nova-api-0\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.200449 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.227302 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.246844 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a08be057-917a-4491-9e59-20a10d296b2a" path="/var/lib/kubelet/pods/a08be057-917a-4491-9e59-20a10d296b2a/volumes" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.247536 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5a3d538-13e4-46b8-a367-7694a0c8c54f" path="/var/lib/kubelet/pods/b5a3d538-13e4-46b8-a367-7694a0c8c54f/volumes" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.248192 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8" path="/var/lib/kubelet/pods/c7d05605-89f0-4f1e-a6ce-ea73cd1fe8d8/volumes" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.259872 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.670995 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.746900 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4926cdd-f4bc-4e14-8a05-d61260bb2649","Type":"ContainerStarted","Data":"eac2e809e7958698f3756c2e7e1f5d1e9c7cc1324add79c32d21bc9a8795edd1"} Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.783729 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:05:54 crc kubenswrapper[4857]: I1128 15:05:54.826773 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:05:54 crc kubenswrapper[4857]: W1128 15:05:54.836254 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabddfbcf_fdbc_4fae_acf2_29943c5e6732.slice/crio-d96844a51f54d99eff726fcddfbf445451146c7aabbbde373979b25bc2070ad0 WatchSource:0}: Error finding container d96844a51f54d99eff726fcddfbf445451146c7aabbbde373979b25bc2070ad0: Status 404 returned error can't find the container with id d96844a51f54d99eff726fcddfbf445451146c7aabbbde373979b25bc2070ad0 Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.767876 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a","Type":"ContainerStarted","Data":"b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.768901 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a","Type":"ContainerStarted","Data":"283dffd64472da77db56efaafa231b967a556a71fc52197fefbf1af779258327"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.772158 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4926cdd-f4bc-4e14-8a05-d61260bb2649","Type":"ContainerStarted","Data":"f3a52c1f8a02ba8347f3dac283a1ecd7d65d2bcccd2c8682630fada1fffca893"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.772203 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4926cdd-f4bc-4e14-8a05-d61260bb2649","Type":"ContainerStarted","Data":"efc47ae08a731d27ec108edff3adc7aa74baff19d905dfb91c1371f98a8fc9e7"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.778202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"abddfbcf-fdbc-4fae-acf2-29943c5e6732","Type":"ContainerStarted","Data":"4a41bfb5eb3dfee23ccc5e5b15a4fdf64fc6618299adaea8e4cd98fe8f0bcb07"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.778246 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"abddfbcf-fdbc-4fae-acf2-29943c5e6732","Type":"ContainerStarted","Data":"b3e8eb7cf2f9075a147ce977c85d7b0eb30cc3667c8da9de79d169734da27a50"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.778265 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"abddfbcf-fdbc-4fae-acf2-29943c5e6732","Type":"ContainerStarted","Data":"d96844a51f54d99eff726fcddfbf445451146c7aabbbde373979b25bc2070ad0"} Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.786226 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.795958 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.795918842 podStartE2EDuration="2.795918842s" podCreationTimestamp="2025-11-28 15:05:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:55.79318847 +0000 UTC m=+5805.917129947" watchObservedRunningTime="2025-11-28 15:05:55.795918842 +0000 UTC m=+5805.919860279" Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.852557 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.852530825 podStartE2EDuration="2.852530825s" podCreationTimestamp="2025-11-28 15:05:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:55.848380214 +0000 UTC m=+5805.972321671" watchObservedRunningTime="2025-11-28 15:05:55.852530825 +0000 UTC m=+5805.976472272" Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.858794 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:55 crc kubenswrapper[4857]: I1128 15:05:55.876300 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.876279089 podStartE2EDuration="2.876279089s" podCreationTimestamp="2025-11-28 15:05:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:05:55.871879771 +0000 UTC m=+5805.995821258" watchObservedRunningTime="2025-11-28 15:05:55.876279089 +0000 UTC m=+5806.000220526" Nov 28 15:05:56 crc kubenswrapper[4857]: I1128 15:05:56.051670 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gxjfh"] Nov 28 15:05:57 crc kubenswrapper[4857]: I1128 15:05:57.805876 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gxjfh" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="registry-server" containerID="cri-o://4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640" gracePeriod=2 Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.429665 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.518093 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-utilities\") pod \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.518175 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7hr7\" (UniqueName: \"kubernetes.io/projected/5b9d00f2-539c-431b-bf1a-5934e2f0e904-kube-api-access-f7hr7\") pod \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.518396 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-catalog-content\") pod \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\" (UID: \"5b9d00f2-539c-431b-bf1a-5934e2f0e904\") " Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.519624 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-utilities" (OuterVolumeSpecName: "utilities") pod "5b9d00f2-539c-431b-bf1a-5934e2f0e904" (UID: "5b9d00f2-539c-431b-bf1a-5934e2f0e904"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.525229 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b9d00f2-539c-431b-bf1a-5934e2f0e904-kube-api-access-f7hr7" (OuterVolumeSpecName: "kube-api-access-f7hr7") pod "5b9d00f2-539c-431b-bf1a-5934e2f0e904" (UID: "5b9d00f2-539c-431b-bf1a-5934e2f0e904"). InnerVolumeSpecName "kube-api-access-f7hr7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.621666 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7hr7\" (UniqueName: \"kubernetes.io/projected/5b9d00f2-539c-431b-bf1a-5934e2f0e904-kube-api-access-f7hr7\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.621711 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.633067 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5b9d00f2-539c-431b-bf1a-5934e2f0e904" (UID: "5b9d00f2-539c-431b-bf1a-5934e2f0e904"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.722676 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5b9d00f2-539c-431b-bf1a-5934e2f0e904-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.819337 4857 generic.go:334] "Generic (PLEG): container finished" podID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerID="4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640" exitCode=0 Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.819396 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerDied","Data":"4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640"} Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.819433 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gxjfh" event={"ID":"5b9d00f2-539c-431b-bf1a-5934e2f0e904","Type":"ContainerDied","Data":"5b7e00fc4716407a846ac7a0937a681062ec5016eae862e72e41724aed495814"} Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.819458 4857 scope.go:117] "RemoveContainer" containerID="4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.820782 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gxjfh" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.898464 4857 scope.go:117] "RemoveContainer" containerID="2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.907693 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gxjfh"] Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.918166 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gxjfh"] Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.933815 4857 scope.go:117] "RemoveContainer" containerID="e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.974486 4857 scope.go:117] "RemoveContainer" containerID="4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640" Nov 28 15:05:58 crc kubenswrapper[4857]: E1128 15:05:58.975858 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640\": container with ID starting with 4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640 not found: ID does not exist" containerID="4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.975924 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640"} err="failed to get container status \"4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640\": rpc error: code = NotFound desc = could not find container \"4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640\": container with ID starting with 4345f6b290235208a39cf8687c36843469771a1a612fe399986eb2c925e6c640 not found: ID does not exist" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.976000 4857 scope.go:117] "RemoveContainer" containerID="2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf" Nov 28 15:05:58 crc kubenswrapper[4857]: E1128 15:05:58.977008 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf\": container with ID starting with 2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf not found: ID does not exist" containerID="2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.977103 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf"} err="failed to get container status \"2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf\": rpc error: code = NotFound desc = could not find container \"2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf\": container with ID starting with 2f8b4ae33bf248726702cfe9c24d1fc0a2079a88d47309ec491aa3d5755debcf not found: ID does not exist" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.977174 4857 scope.go:117] "RemoveContainer" containerID="e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8" Nov 28 15:05:58 crc kubenswrapper[4857]: E1128 15:05:58.977880 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8\": container with ID starting with e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8 not found: ID does not exist" containerID="e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8" Nov 28 15:05:58 crc kubenswrapper[4857]: I1128 15:05:58.977921 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8"} err="failed to get container status \"e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8\": rpc error: code = NotFound desc = could not find container \"e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8\": container with ID starting with e4be5e74f95b80a1c29857bab0b19c3dffde2e0425c24833a94ca9c3901925c8 not found: ID does not exist" Nov 28 15:05:59 crc kubenswrapper[4857]: I1128 15:05:59.201151 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:05:59 crc kubenswrapper[4857]: I1128 15:05:59.201437 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:05:59 crc kubenswrapper[4857]: I1128 15:05:59.228185 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:06:00 crc kubenswrapper[4857]: I1128 15:06:00.251363 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" path="/var/lib/kubelet/pods/5b9d00f2-539c-431b-bf1a-5934e2f0e904/volumes" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.201581 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.202756 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.228203 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.260327 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.260679 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.271050 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:06:04 crc kubenswrapper[4857]: I1128 15:06:04.931629 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:06:05 crc kubenswrapper[4857]: I1128 15:06:05.285282 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.75:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:06:05 crc kubenswrapper[4857]: I1128 15:06:05.285280 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.75:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:06:05 crc kubenswrapper[4857]: I1128 15:06:05.368212 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.77:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:06:05 crc kubenswrapper[4857]: I1128 15:06:05.368328 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.77:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:06:11 crc kubenswrapper[4857]: I1128 15:06:11.309416 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:06:11 crc kubenswrapper[4857]: I1128 15:06:11.313138 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:06:11 crc kubenswrapper[4857]: I1128 15:06:11.313238 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:06:11 crc kubenswrapper[4857]: I1128 15:06:11.314219 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:06:11 crc kubenswrapper[4857]: I1128 15:06:11.314296 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" gracePeriod=600 Nov 28 15:06:11 crc kubenswrapper[4857]: E1128 15:06:11.443977 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:06:12 crc kubenswrapper[4857]: I1128 15:06:12.009186 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" exitCode=0 Nov 28 15:06:12 crc kubenswrapper[4857]: I1128 15:06:12.009272 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1"} Nov 28 15:06:12 crc kubenswrapper[4857]: I1128 15:06:12.009361 4857 scope.go:117] "RemoveContainer" containerID="e74738a0b8d5bffa43f61ec8bf86cdc9bce08ade2ead6c33503f5aba9862d3f0" Nov 28 15:06:12 crc kubenswrapper[4857]: I1128 15:06:12.010463 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:06:12 crc kubenswrapper[4857]: E1128 15:06:12.011135 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.204758 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.206263 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.209524 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.266249 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.266371 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.266755 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.267072 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.272096 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.275802 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.546818 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78644f9f4f-fwdcg"] Nov 28 15:06:14 crc kubenswrapper[4857]: E1128 15:06:14.547653 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="registry-server" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.547674 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="registry-server" Nov 28 15:06:14 crc kubenswrapper[4857]: E1128 15:06:14.547689 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="extract-utilities" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.547698 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="extract-utilities" Nov 28 15:06:14 crc kubenswrapper[4857]: E1128 15:06:14.547730 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="extract-content" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.547738 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="extract-content" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.547977 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5b9d00f2-539c-431b-bf1a-5934e2f0e904" containerName="registry-server" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.549107 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.572599 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78644f9f4f-fwdcg"] Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.719178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45hhc\" (UniqueName: \"kubernetes.io/projected/3c713cef-f071-4007-9b70-19a5867ff4d0-kube-api-access-45hhc\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.719239 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-sb\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.719259 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-nb\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.719339 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-dns-svc\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.719360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-config\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.821128 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45hhc\" (UniqueName: \"kubernetes.io/projected/3c713cef-f071-4007-9b70-19a5867ff4d0-kube-api-access-45hhc\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.821211 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-nb\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.821258 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-sb\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.821394 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-dns-svc\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.821428 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-config\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.822386 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-nb\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.822401 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-config\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.822453 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-sb\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.823298 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-dns-svc\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.845649 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45hhc\" (UniqueName: \"kubernetes.io/projected/3c713cef-f071-4007-9b70-19a5867ff4d0-kube-api-access-45hhc\") pod \"dnsmasq-dns-78644f9f4f-fwdcg\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:14 crc kubenswrapper[4857]: I1128 15:06:14.875504 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:15 crc kubenswrapper[4857]: I1128 15:06:15.052217 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:06:15 crc kubenswrapper[4857]: I1128 15:06:15.402057 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78644f9f4f-fwdcg"] Nov 28 15:06:16 crc kubenswrapper[4857]: I1128 15:06:16.054936 4857 generic.go:334] "Generic (PLEG): container finished" podID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerID="eeec77ada6797b557cf534f4f93678af4765bb3382fde066160d77d2f3710161" exitCode=0 Nov 28 15:06:16 crc kubenswrapper[4857]: I1128 15:06:16.054984 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" event={"ID":"3c713cef-f071-4007-9b70-19a5867ff4d0","Type":"ContainerDied","Data":"eeec77ada6797b557cf534f4f93678af4765bb3382fde066160d77d2f3710161"} Nov 28 15:06:16 crc kubenswrapper[4857]: I1128 15:06:16.055506 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" event={"ID":"3c713cef-f071-4007-9b70-19a5867ff4d0","Type":"ContainerStarted","Data":"30b3a10665e9b2622cf78580de123018067a92f12fbdd8520bd46f2c70f7356d"} Nov 28 15:06:17 crc kubenswrapper[4857]: I1128 15:06:17.076071 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" event={"ID":"3c713cef-f071-4007-9b70-19a5867ff4d0","Type":"ContainerStarted","Data":"a338a944f0d2851bd1717018002460d53e9d4c2ce0ba7719fa168077315c391f"} Nov 28 15:06:17 crc kubenswrapper[4857]: I1128 15:06:17.076511 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:17 crc kubenswrapper[4857]: I1128 15:06:17.106277 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" podStartSLOduration=3.106253556 podStartE2EDuration="3.106253556s" podCreationTimestamp="2025-11-28 15:06:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:17.10041729 +0000 UTC m=+5827.224358757" watchObservedRunningTime="2025-11-28 15:06:17.106253556 +0000 UTC m=+5827.230194993" Nov 28 15:06:24 crc kubenswrapper[4857]: I1128 15:06:24.877135 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:24 crc kubenswrapper[4857]: I1128 15:06:24.973189 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68986bcc97-7cg9g"] Nov 28 15:06:24 crc kubenswrapper[4857]: I1128 15:06:24.973529 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerName="dnsmasq-dns" containerID="cri-o://31956e593877833bd9af85f7019922c6a345dd26222ba25cf15855f4102287d4" gracePeriod=10 Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.170751 4857 generic.go:334] "Generic (PLEG): container finished" podID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerID="31956e593877833bd9af85f7019922c6a345dd26222ba25cf15855f4102287d4" exitCode=0 Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.171307 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" event={"ID":"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a","Type":"ContainerDied","Data":"31956e593877833bd9af85f7019922c6a345dd26222ba25cf15855f4102287d4"} Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.229478 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:06:25 crc kubenswrapper[4857]: E1128 15:06:25.229796 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.490573 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.590411 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-dns-svc\") pod \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.590488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-nb\") pod \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.590586 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-config\") pod \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.590644 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-sb\") pod \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.590705 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttsbh\" (UniqueName: \"kubernetes.io/projected/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-kube-api-access-ttsbh\") pod \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\" (UID: \"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a\") " Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.609206 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-kube-api-access-ttsbh" (OuterVolumeSpecName: "kube-api-access-ttsbh") pod "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" (UID: "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a"). InnerVolumeSpecName "kube-api-access-ttsbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.635551 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-config" (OuterVolumeSpecName: "config") pod "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" (UID: "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.640072 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" (UID: "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.641410 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" (UID: "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.651961 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" (UID: "f446bab3-fb39-49ec-bc9b-e782aaf0cd5a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.693327 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.693535 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.693664 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.693719 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:25 crc kubenswrapper[4857]: I1128 15:06:25.693772 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttsbh\" (UniqueName: \"kubernetes.io/projected/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a-kube-api-access-ttsbh\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:26 crc kubenswrapper[4857]: I1128 15:06:26.184333 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" event={"ID":"f446bab3-fb39-49ec-bc9b-e782aaf0cd5a","Type":"ContainerDied","Data":"2413bc0537cbd00aea2543e902e5114253165bab0149ae27fb60fcf9243672fc"} Nov 28 15:06:26 crc kubenswrapper[4857]: I1128 15:06:26.184878 4857 scope.go:117] "RemoveContainer" containerID="31956e593877833bd9af85f7019922c6a345dd26222ba25cf15855f4102287d4" Nov 28 15:06:26 crc kubenswrapper[4857]: I1128 15:06:26.184431 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68986bcc97-7cg9g" Nov 28 15:06:26 crc kubenswrapper[4857]: I1128 15:06:26.229452 4857 scope.go:117] "RemoveContainer" containerID="82618a00a08658c83726259d4c81250ccb1c1ccff53c5f84a30452d7d3416bcf" Nov 28 15:06:26 crc kubenswrapper[4857]: I1128 15:06:26.255080 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68986bcc97-7cg9g"] Nov 28 15:06:26 crc kubenswrapper[4857]: I1128 15:06:26.265939 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68986bcc97-7cg9g"] Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.282726 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-7tw52"] Nov 28 15:06:27 crc kubenswrapper[4857]: E1128 15:06:27.283282 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerName="dnsmasq-dns" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.283300 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerName="dnsmasq-dns" Nov 28 15:06:27 crc kubenswrapper[4857]: E1128 15:06:27.283322 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerName="init" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.283330 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerName="init" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.283559 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" containerName="dnsmasq-dns" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.284389 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.312071 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7tw52"] Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.333649 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9hhv\" (UniqueName: \"kubernetes.io/projected/a49de8a5-e449-4cd2-976a-015e5c8e4362-kube-api-access-x9hhv\") pod \"cinder-db-create-7tw52\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.333817 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a49de8a5-e449-4cd2-976a-015e5c8e4362-operator-scripts\") pod \"cinder-db-create-7tw52\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.381313 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-c454-account-create-update-9schb"] Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.382640 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.386854 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.390713 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c454-account-create-update-9schb"] Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.435212 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a49de8a5-e449-4cd2-976a-015e5c8e4362-operator-scripts\") pod \"cinder-db-create-7tw52\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.435300 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-operator-scripts\") pod \"cinder-c454-account-create-update-9schb\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.435329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9hhv\" (UniqueName: \"kubernetes.io/projected/a49de8a5-e449-4cd2-976a-015e5c8e4362-kube-api-access-x9hhv\") pod \"cinder-db-create-7tw52\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.435350 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnwjw\" (UniqueName: \"kubernetes.io/projected/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-kube-api-access-mnwjw\") pod \"cinder-c454-account-create-update-9schb\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.436071 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a49de8a5-e449-4cd2-976a-015e5c8e4362-operator-scripts\") pod \"cinder-db-create-7tw52\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.461530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9hhv\" (UniqueName: \"kubernetes.io/projected/a49de8a5-e449-4cd2-976a-015e5c8e4362-kube-api-access-x9hhv\") pod \"cinder-db-create-7tw52\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.537664 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-operator-scripts\") pod \"cinder-c454-account-create-update-9schb\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.538181 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnwjw\" (UniqueName: \"kubernetes.io/projected/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-kube-api-access-mnwjw\") pod \"cinder-c454-account-create-update-9schb\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.538455 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-operator-scripts\") pod \"cinder-c454-account-create-update-9schb\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.562811 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnwjw\" (UniqueName: \"kubernetes.io/projected/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-kube-api-access-mnwjw\") pod \"cinder-c454-account-create-update-9schb\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.613635 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:27 crc kubenswrapper[4857]: I1128 15:06:27.751086 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:28 crc kubenswrapper[4857]: I1128 15:06:28.156182 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7tw52"] Nov 28 15:06:28 crc kubenswrapper[4857]: I1128 15:06:28.207853 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7tw52" event={"ID":"a49de8a5-e449-4cd2-976a-015e5c8e4362","Type":"ContainerStarted","Data":"84e1de0b4db1cbd5746c5900077d667fe8c026dc96196bdc3207e0fa95575a6f"} Nov 28 15:06:28 crc kubenswrapper[4857]: I1128 15:06:28.239764 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f446bab3-fb39-49ec-bc9b-e782aaf0cd5a" path="/var/lib/kubelet/pods/f446bab3-fb39-49ec-bc9b-e782aaf0cd5a/volumes" Nov 28 15:06:28 crc kubenswrapper[4857]: I1128 15:06:28.278464 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-c454-account-create-update-9schb"] Nov 28 15:06:28 crc kubenswrapper[4857]: W1128 15:06:28.288247 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd3a275e_912f_42e7_85b2_7e0ed8f6c2a1.slice/crio-6ce6ca97e9b4a4ff5f7d7b5197de6fde49f111ef532fcc293c5d963e741475b1 WatchSource:0}: Error finding container 6ce6ca97e9b4a4ff5f7d7b5197de6fde49f111ef532fcc293c5d963e741475b1: Status 404 returned error can't find the container with id 6ce6ca97e9b4a4ff5f7d7b5197de6fde49f111ef532fcc293c5d963e741475b1 Nov 28 15:06:29 crc kubenswrapper[4857]: I1128 15:06:29.223478 4857 generic.go:334] "Generic (PLEG): container finished" podID="a49de8a5-e449-4cd2-976a-015e5c8e4362" containerID="ba82ca8cd5c155ae56c6776578e535ac2429a735058c25a210d2ce7ec80f1085" exitCode=0 Nov 28 15:06:29 crc kubenswrapper[4857]: I1128 15:06:29.224036 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7tw52" event={"ID":"a49de8a5-e449-4cd2-976a-015e5c8e4362","Type":"ContainerDied","Data":"ba82ca8cd5c155ae56c6776578e535ac2429a735058c25a210d2ce7ec80f1085"} Nov 28 15:06:29 crc kubenswrapper[4857]: I1128 15:06:29.227842 4857 generic.go:334] "Generic (PLEG): container finished" podID="bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" containerID="0fb3206cba2f538ea8066fbc30080b541d1814237828588e02d1b37a903e5638" exitCode=0 Nov 28 15:06:29 crc kubenswrapper[4857]: I1128 15:06:29.227943 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c454-account-create-update-9schb" event={"ID":"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1","Type":"ContainerDied","Data":"0fb3206cba2f538ea8066fbc30080b541d1814237828588e02d1b37a903e5638"} Nov 28 15:06:29 crc kubenswrapper[4857]: I1128 15:06:29.228041 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c454-account-create-update-9schb" event={"ID":"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1","Type":"ContainerStarted","Data":"6ce6ca97e9b4a4ff5f7d7b5197de6fde49f111ef532fcc293c5d963e741475b1"} Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.735520 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.741156 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.925784 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-operator-scripts\") pod \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.925904 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a49de8a5-e449-4cd2-976a-015e5c8e4362-operator-scripts\") pod \"a49de8a5-e449-4cd2-976a-015e5c8e4362\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.926006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnwjw\" (UniqueName: \"kubernetes.io/projected/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-kube-api-access-mnwjw\") pod \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\" (UID: \"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1\") " Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.926064 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9hhv\" (UniqueName: \"kubernetes.io/projected/a49de8a5-e449-4cd2-976a-015e5c8e4362-kube-api-access-x9hhv\") pod \"a49de8a5-e449-4cd2-976a-015e5c8e4362\" (UID: \"a49de8a5-e449-4cd2-976a-015e5c8e4362\") " Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.926346 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" (UID: "bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.927222 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.927239 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a49de8a5-e449-4cd2-976a-015e5c8e4362-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a49de8a5-e449-4cd2-976a-015e5c8e4362" (UID: "a49de8a5-e449-4cd2-976a-015e5c8e4362"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.933598 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-kube-api-access-mnwjw" (OuterVolumeSpecName: "kube-api-access-mnwjw") pod "bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" (UID: "bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1"). InnerVolumeSpecName "kube-api-access-mnwjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:30 crc kubenswrapper[4857]: I1128 15:06:30.935243 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a49de8a5-e449-4cd2-976a-015e5c8e4362-kube-api-access-x9hhv" (OuterVolumeSpecName: "kube-api-access-x9hhv") pod "a49de8a5-e449-4cd2-976a-015e5c8e4362" (UID: "a49de8a5-e449-4cd2-976a-015e5c8e4362"). InnerVolumeSpecName "kube-api-access-x9hhv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.029616 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a49de8a5-e449-4cd2-976a-015e5c8e4362-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.029682 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnwjw\" (UniqueName: \"kubernetes.io/projected/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1-kube-api-access-mnwjw\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.029706 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9hhv\" (UniqueName: \"kubernetes.io/projected/a49de8a5-e449-4cd2-976a-015e5c8e4362-kube-api-access-x9hhv\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.261649 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7tw52" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.262053 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7tw52" event={"ID":"a49de8a5-e449-4cd2-976a-015e5c8e4362","Type":"ContainerDied","Data":"84e1de0b4db1cbd5746c5900077d667fe8c026dc96196bdc3207e0fa95575a6f"} Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.262664 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="84e1de0b4db1cbd5746c5900077d667fe8c026dc96196bdc3207e0fa95575a6f" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.264265 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-c454-account-create-update-9schb" event={"ID":"bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1","Type":"ContainerDied","Data":"6ce6ca97e9b4a4ff5f7d7b5197de6fde49f111ef532fcc293c5d963e741475b1"} Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.264321 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ce6ca97e9b4a4ff5f7d7b5197de6fde49f111ef532fcc293c5d963e741475b1" Nov 28 15:06:31 crc kubenswrapper[4857]: I1128 15:06:31.264407 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-c454-account-create-update-9schb" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.651315 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-8xp9l"] Nov 28 15:06:32 crc kubenswrapper[4857]: E1128 15:06:32.652486 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" containerName="mariadb-account-create-update" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.652506 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" containerName="mariadb-account-create-update" Nov 28 15:06:32 crc kubenswrapper[4857]: E1128 15:06:32.652556 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a49de8a5-e449-4cd2-976a-015e5c8e4362" containerName="mariadb-database-create" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.652563 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a49de8a5-e449-4cd2-976a-015e5c8e4362" containerName="mariadb-database-create" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.652876 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" containerName="mariadb-account-create-update" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.652902 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a49de8a5-e449-4cd2-976a-015e5c8e4362" containerName="mariadb-database-create" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.653914 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.657285 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.657462 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.657874 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-k656c" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.684238 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8xp9l"] Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.776197 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwd92\" (UniqueName: \"kubernetes.io/projected/883a5e33-4994-4674-84fe-14c2d13671e9-kube-api-access-vwd92\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.776759 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/883a5e33-4994-4674-84fe-14c2d13671e9-etc-machine-id\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.777511 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-scripts\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.777905 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-config-data\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.778158 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-db-sync-config-data\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.778330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-combined-ca-bundle\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.901603 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-config-data\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.901690 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-combined-ca-bundle\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.901715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-db-sync-config-data\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.902092 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwd92\" (UniqueName: \"kubernetes.io/projected/883a5e33-4994-4674-84fe-14c2d13671e9-kube-api-access-vwd92\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.902305 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/883a5e33-4994-4674-84fe-14c2d13671e9-etc-machine-id\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.902932 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-scripts\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.903253 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/883a5e33-4994-4674-84fe-14c2d13671e9-etc-machine-id\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.914385 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-combined-ca-bundle\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.914839 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-db-sync-config-data\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.918064 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-config-data\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.918279 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-scripts\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.941506 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwd92\" (UniqueName: \"kubernetes.io/projected/883a5e33-4994-4674-84fe-14c2d13671e9-kube-api-access-vwd92\") pod \"cinder-db-sync-8xp9l\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:32 crc kubenswrapper[4857]: I1128 15:06:32.986670 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:33 crc kubenswrapper[4857]: I1128 15:06:33.484412 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-8xp9l"] Nov 28 15:06:34 crc kubenswrapper[4857]: I1128 15:06:34.304970 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8xp9l" event={"ID":"883a5e33-4994-4674-84fe-14c2d13671e9","Type":"ContainerStarted","Data":"4b397ac0f11dd7b65a0bbbe748e754f917d38ad873f1cf01e1e6fe9515e5d2d1"} Nov 28 15:06:34 crc kubenswrapper[4857]: I1128 15:06:34.305556 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8xp9l" event={"ID":"883a5e33-4994-4674-84fe-14c2d13671e9","Type":"ContainerStarted","Data":"04f8e7a29c1d4104d44441182d434b9a48194b39ae0a4d44e9ea3d71e29068da"} Nov 28 15:06:34 crc kubenswrapper[4857]: I1128 15:06:34.328772 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-8xp9l" podStartSLOduration=2.328748039 podStartE2EDuration="2.328748039s" podCreationTimestamp="2025-11-28 15:06:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:34.326203531 +0000 UTC m=+5844.450144998" watchObservedRunningTime="2025-11-28 15:06:34.328748039 +0000 UTC m=+5844.452689496" Nov 28 15:06:36 crc kubenswrapper[4857]: I1128 15:06:36.229837 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:06:36 crc kubenswrapper[4857]: E1128 15:06:36.231289 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:06:37 crc kubenswrapper[4857]: I1128 15:06:37.344325 4857 generic.go:334] "Generic (PLEG): container finished" podID="883a5e33-4994-4674-84fe-14c2d13671e9" containerID="4b397ac0f11dd7b65a0bbbe748e754f917d38ad873f1cf01e1e6fe9515e5d2d1" exitCode=0 Nov 28 15:06:37 crc kubenswrapper[4857]: I1128 15:06:37.344393 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8xp9l" event={"ID":"883a5e33-4994-4674-84fe-14c2d13671e9","Type":"ContainerDied","Data":"4b397ac0f11dd7b65a0bbbe748e754f917d38ad873f1cf01e1e6fe9515e5d2d1"} Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.777703 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.863358 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-config-data\") pod \"883a5e33-4994-4674-84fe-14c2d13671e9\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.863727 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwd92\" (UniqueName: \"kubernetes.io/projected/883a5e33-4994-4674-84fe-14c2d13671e9-kube-api-access-vwd92\") pod \"883a5e33-4994-4674-84fe-14c2d13671e9\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.864662 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-scripts\") pod \"883a5e33-4994-4674-84fe-14c2d13671e9\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.864804 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-combined-ca-bundle\") pod \"883a5e33-4994-4674-84fe-14c2d13671e9\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.864995 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-db-sync-config-data\") pod \"883a5e33-4994-4674-84fe-14c2d13671e9\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.865280 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/883a5e33-4994-4674-84fe-14c2d13671e9-etc-machine-id\") pod \"883a5e33-4994-4674-84fe-14c2d13671e9\" (UID: \"883a5e33-4994-4674-84fe-14c2d13671e9\") " Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.865367 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/883a5e33-4994-4674-84fe-14c2d13671e9-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "883a5e33-4994-4674-84fe-14c2d13671e9" (UID: "883a5e33-4994-4674-84fe-14c2d13671e9"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.865911 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/883a5e33-4994-4674-84fe-14c2d13671e9-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.870270 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/883a5e33-4994-4674-84fe-14c2d13671e9-kube-api-access-vwd92" (OuterVolumeSpecName: "kube-api-access-vwd92") pod "883a5e33-4994-4674-84fe-14c2d13671e9" (UID: "883a5e33-4994-4674-84fe-14c2d13671e9"). InnerVolumeSpecName "kube-api-access-vwd92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.870495 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "883a5e33-4994-4674-84fe-14c2d13671e9" (UID: "883a5e33-4994-4674-84fe-14c2d13671e9"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.893473 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-scripts" (OuterVolumeSpecName: "scripts") pod "883a5e33-4994-4674-84fe-14c2d13671e9" (UID: "883a5e33-4994-4674-84fe-14c2d13671e9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.906214 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "883a5e33-4994-4674-84fe-14c2d13671e9" (UID: "883a5e33-4994-4674-84fe-14c2d13671e9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.942403 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-config-data" (OuterVolumeSpecName: "config-data") pod "883a5e33-4994-4674-84fe-14c2d13671e9" (UID: "883a5e33-4994-4674-84fe-14c2d13671e9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.968084 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.968150 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwd92\" (UniqueName: \"kubernetes.io/projected/883a5e33-4994-4674-84fe-14c2d13671e9-kube-api-access-vwd92\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.968171 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.968189 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:38 crc kubenswrapper[4857]: I1128 15:06:38.968210 4857 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/883a5e33-4994-4674-84fe-14c2d13671e9-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.371653 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-8xp9l" event={"ID":"883a5e33-4994-4674-84fe-14c2d13671e9","Type":"ContainerDied","Data":"04f8e7a29c1d4104d44441182d434b9a48194b39ae0a4d44e9ea3d71e29068da"} Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.371718 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="04f8e7a29c1d4104d44441182d434b9a48194b39ae0a4d44e9ea3d71e29068da" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.371803 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-8xp9l" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.714021 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77946ff95c-6lx4x"] Nov 28 15:06:39 crc kubenswrapper[4857]: E1128 15:06:39.715384 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="883a5e33-4994-4674-84fe-14c2d13671e9" containerName="cinder-db-sync" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.715416 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="883a5e33-4994-4674-84fe-14c2d13671e9" containerName="cinder-db-sync" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.715699 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="883a5e33-4994-4674-84fe-14c2d13671e9" containerName="cinder-db-sync" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.718108 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.727005 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77946ff95c-6lx4x"] Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.790134 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-sb\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.790620 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-dns-svc\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.790673 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-config\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.790729 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-nb\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.790783 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n58cl\" (UniqueName: \"kubernetes.io/projected/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-kube-api-access-n58cl\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.892603 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-dns-svc\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.892648 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-config\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.892668 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-nb\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.892694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n58cl\" (UniqueName: \"kubernetes.io/projected/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-kube-api-access-n58cl\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.892737 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-sb\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.893791 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-config\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.893814 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-sb\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.893832 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-nb\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.894099 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-dns-svc\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.913839 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n58cl\" (UniqueName: \"kubernetes.io/projected/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-kube-api-access-n58cl\") pod \"dnsmasq-dns-77946ff95c-6lx4x\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.931631 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.935258 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.939204 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.939373 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.939521 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-k656c" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.939690 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.949859 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995069 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6fa8fc93-7ca7-489a-a24c-fd617f50b178-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995125 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-scripts\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995159 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mxfw\" (UniqueName: \"kubernetes.io/projected/6fa8fc93-7ca7-489a-a24c-fd617f50b178-kube-api-access-7mxfw\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995179 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995193 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fa8fc93-7ca7-489a-a24c-fd617f50b178-logs\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995389 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:39 crc kubenswrapper[4857]: I1128 15:06:39.995549 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data-custom\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.057295 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.097709 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data-custom\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.097826 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6fa8fc93-7ca7-489a-a24c-fd617f50b178-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.097872 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-scripts\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.097908 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mxfw\" (UniqueName: \"kubernetes.io/projected/6fa8fc93-7ca7-489a-a24c-fd617f50b178-kube-api-access-7mxfw\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.097928 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.097961 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fa8fc93-7ca7-489a-a24c-fd617f50b178-logs\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.098000 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.098097 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6fa8fc93-7ca7-489a-a24c-fd617f50b178-etc-machine-id\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.100511 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fa8fc93-7ca7-489a-a24c-fd617f50b178-logs\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.105828 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.107237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data-custom\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.110463 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.117544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-scripts\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.121585 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mxfw\" (UniqueName: \"kubernetes.io/projected/6fa8fc93-7ca7-489a-a24c-fd617f50b178-kube-api-access-7mxfw\") pod \"cinder-api-0\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.298657 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.701472 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77946ff95c-6lx4x"] Nov 28 15:06:40 crc kubenswrapper[4857]: I1128 15:06:40.937303 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:06:40 crc kubenswrapper[4857]: W1128 15:06:40.937469 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fa8fc93_7ca7_489a_a24c_fd617f50b178.slice/crio-81c53b761761358988fe165149142aee5b141fbf8e8d2187c1b55e53d38aa8eb WatchSource:0}: Error finding container 81c53b761761358988fe165149142aee5b141fbf8e8d2187c1b55e53d38aa8eb: Status 404 returned error can't find the container with id 81c53b761761358988fe165149142aee5b141fbf8e8d2187c1b55e53d38aa8eb Nov 28 15:06:41 crc kubenswrapper[4857]: I1128 15:06:41.425334 4857 generic.go:334] "Generic (PLEG): container finished" podID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerID="4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a" exitCode=0 Nov 28 15:06:41 crc kubenswrapper[4857]: I1128 15:06:41.427097 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" event={"ID":"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a","Type":"ContainerDied","Data":"4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a"} Nov 28 15:06:41 crc kubenswrapper[4857]: I1128 15:06:41.427193 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" event={"ID":"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a","Type":"ContainerStarted","Data":"c44de8e4a0f26f7e7bdde860bc83dc296ccdfa6ab4ca47ef9292708c46a5f1ac"} Nov 28 15:06:41 crc kubenswrapper[4857]: I1128 15:06:41.431211 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6fa8fc93-7ca7-489a-a24c-fd617f50b178","Type":"ContainerStarted","Data":"81c53b761761358988fe165149142aee5b141fbf8e8d2187c1b55e53d38aa8eb"} Nov 28 15:06:42 crc kubenswrapper[4857]: I1128 15:06:42.442723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" event={"ID":"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a","Type":"ContainerStarted","Data":"7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34"} Nov 28 15:06:42 crc kubenswrapper[4857]: I1128 15:06:42.443416 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:42 crc kubenswrapper[4857]: I1128 15:06:42.445143 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6fa8fc93-7ca7-489a-a24c-fd617f50b178","Type":"ContainerStarted","Data":"b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9"} Nov 28 15:06:42 crc kubenswrapper[4857]: I1128 15:06:42.465153 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" podStartSLOduration=3.465133328 podStartE2EDuration="3.465133328s" podCreationTimestamp="2025-11-28 15:06:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:42.459137638 +0000 UTC m=+5852.583079085" watchObservedRunningTime="2025-11-28 15:06:42.465133328 +0000 UTC m=+5852.589074785" Nov 28 15:06:43 crc kubenswrapper[4857]: I1128 15:06:43.456547 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6fa8fc93-7ca7-489a-a24c-fd617f50b178","Type":"ContainerStarted","Data":"f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809"} Nov 28 15:06:43 crc kubenswrapper[4857]: I1128 15:06:43.457135 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 15:06:43 crc kubenswrapper[4857]: I1128 15:06:43.490711 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.49069075 podStartE2EDuration="4.49069075s" podCreationTimestamp="2025-11-28 15:06:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:43.479580254 +0000 UTC m=+5853.603521711" watchObservedRunningTime="2025-11-28 15:06:43.49069075 +0000 UTC m=+5853.614632187" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.059212 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.174460 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78644f9f4f-fwdcg"] Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.174738 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerName="dnsmasq-dns" containerID="cri-o://a338a944f0d2851bd1717018002460d53e9d4c2ce0ba7719fa168077315c391f" gracePeriod=10 Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.242675 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:06:50 crc kubenswrapper[4857]: E1128 15:06:50.243038 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.563674 4857 generic.go:334] "Generic (PLEG): container finished" podID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerID="a338a944f0d2851bd1717018002460d53e9d4c2ce0ba7719fa168077315c391f" exitCode=0 Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.563730 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" event={"ID":"3c713cef-f071-4007-9b70-19a5867ff4d0","Type":"ContainerDied","Data":"a338a944f0d2851bd1717018002460d53e9d4c2ce0ba7719fa168077315c391f"} Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.678255 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.766304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-nb\") pod \"3c713cef-f071-4007-9b70-19a5867ff4d0\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.766931 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-dns-svc\") pod \"3c713cef-f071-4007-9b70-19a5867ff4d0\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.767055 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-sb\") pod \"3c713cef-f071-4007-9b70-19a5867ff4d0\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.767083 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-config\") pod \"3c713cef-f071-4007-9b70-19a5867ff4d0\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.767197 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-45hhc\" (UniqueName: \"kubernetes.io/projected/3c713cef-f071-4007-9b70-19a5867ff4d0-kube-api-access-45hhc\") pod \"3c713cef-f071-4007-9b70-19a5867ff4d0\" (UID: \"3c713cef-f071-4007-9b70-19a5867ff4d0\") " Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.794576 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c713cef-f071-4007-9b70-19a5867ff4d0-kube-api-access-45hhc" (OuterVolumeSpecName: "kube-api-access-45hhc") pod "3c713cef-f071-4007-9b70-19a5867ff4d0" (UID: "3c713cef-f071-4007-9b70-19a5867ff4d0"). InnerVolumeSpecName "kube-api-access-45hhc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.821655 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3c713cef-f071-4007-9b70-19a5867ff4d0" (UID: "3c713cef-f071-4007-9b70-19a5867ff4d0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.825875 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3c713cef-f071-4007-9b70-19a5867ff4d0" (UID: "3c713cef-f071-4007-9b70-19a5867ff4d0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.825914 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-config" (OuterVolumeSpecName: "config") pod "3c713cef-f071-4007-9b70-19a5867ff4d0" (UID: "3c713cef-f071-4007-9b70-19a5867ff4d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.828906 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3c713cef-f071-4007-9b70-19a5867ff4d0" (UID: "3c713cef-f071-4007-9b70-19a5867ff4d0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.868966 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.869007 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.869019 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.869030 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-45hhc\" (UniqueName: \"kubernetes.io/projected/3c713cef-f071-4007-9b70-19a5867ff4d0-kube-api-access-45hhc\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:50 crc kubenswrapper[4857]: I1128 15:06:50.869042 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3c713cef-f071-4007-9b70-19a5867ff4d0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.321373 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.321732 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" containerName="nova-scheduler-scheduler" containerID="cri-o://b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.346001 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.346252 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-log" containerID="cri-o://efc47ae08a731d27ec108edff3adc7aa74baff19d905dfb91c1371f98a8fc9e7" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.346383 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-metadata" containerID="cri-o://f3a52c1f8a02ba8347f3dac283a1ecd7d65d2bcccd2c8682630fada1fffca893" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.360811 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.361064 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-log" containerID="cri-o://b3e8eb7cf2f9075a147ce977c85d7b0eb30cc3667c8da9de79d169734da27a50" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.361501 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-api" containerID="cri-o://4a41bfb5eb3dfee23ccc5e5b15a4fdf64fc6618299adaea8e4cd98fe8f0bcb07" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.373918 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.374343 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="1478276e-09fe-4ff5-bf7e-235cc6c59ad2" containerName="nova-cell0-conductor-conductor" containerID="cri-o://62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.385562 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.385895 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="3f559063-f4d6-4d59-988c-a1838761d3af" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.433762 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.434097 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="e106e94c-ae77-44e4-a276-4c01117a17f3" containerName="nova-cell1-conductor-conductor" containerID="cri-o://b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd" gracePeriod=30 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.575194 4857 generic.go:334] "Generic (PLEG): container finished" podID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerID="efc47ae08a731d27ec108edff3adc7aa74baff19d905dfb91c1371f98a8fc9e7" exitCode=143 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.575301 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4926cdd-f4bc-4e14-8a05-d61260bb2649","Type":"ContainerDied","Data":"efc47ae08a731d27ec108edff3adc7aa74baff19d905dfb91c1371f98a8fc9e7"} Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.577817 4857 generic.go:334] "Generic (PLEG): container finished" podID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerID="b3e8eb7cf2f9075a147ce977c85d7b0eb30cc3667c8da9de79d169734da27a50" exitCode=143 Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.577887 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"abddfbcf-fdbc-4fae-acf2-29943c5e6732","Type":"ContainerDied","Data":"b3e8eb7cf2f9075a147ce977c85d7b0eb30cc3667c8da9de79d169734da27a50"} Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.579876 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" event={"ID":"3c713cef-f071-4007-9b70-19a5867ff4d0","Type":"ContainerDied","Data":"30b3a10665e9b2622cf78580de123018067a92f12fbdd8520bd46f2c70f7356d"} Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.579932 4857 scope.go:117] "RemoveContainer" containerID="a338a944f0d2851bd1717018002460d53e9d4c2ce0ba7719fa168077315c391f" Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.580118 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78644f9f4f-fwdcg" Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.619731 4857 scope.go:117] "RemoveContainer" containerID="eeec77ada6797b557cf534f4f93678af4765bb3382fde066160d77d2f3710161" Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.623669 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78644f9f4f-fwdcg"] Nov 28 15:06:51 crc kubenswrapper[4857]: I1128 15:06:51.634701 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78644f9f4f-fwdcg"] Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.109657 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.200784 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7t66g\" (UniqueName: \"kubernetes.io/projected/3f559063-f4d6-4d59-988c-a1838761d3af-kube-api-access-7t66g\") pod \"3f559063-f4d6-4d59-988c-a1838761d3af\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.200910 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-combined-ca-bundle\") pod \"3f559063-f4d6-4d59-988c-a1838761d3af\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.200967 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-config-data\") pod \"3f559063-f4d6-4d59-988c-a1838761d3af\" (UID: \"3f559063-f4d6-4d59-988c-a1838761d3af\") " Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.209787 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f559063-f4d6-4d59-988c-a1838761d3af-kube-api-access-7t66g" (OuterVolumeSpecName: "kube-api-access-7t66g") pod "3f559063-f4d6-4d59-988c-a1838761d3af" (UID: "3f559063-f4d6-4d59-988c-a1838761d3af"). InnerVolumeSpecName "kube-api-access-7t66g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.237096 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-config-data" (OuterVolumeSpecName: "config-data") pod "3f559063-f4d6-4d59-988c-a1838761d3af" (UID: "3f559063-f4d6-4d59-988c-a1838761d3af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.240509 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3f559063-f4d6-4d59-988c-a1838761d3af" (UID: "3f559063-f4d6-4d59-988c-a1838761d3af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.249042 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" path="/var/lib/kubelet/pods/3c713cef-f071-4007-9b70-19a5867ff4d0/volumes" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.302912 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t66g\" (UniqueName: \"kubernetes.io/projected/3f559063-f4d6-4d59-988c-a1838761d3af-kube-api-access-7t66g\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.303019 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.303033 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3f559063-f4d6-4d59-988c-a1838761d3af-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.445762 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.589073 4857 generic.go:334] "Generic (PLEG): container finished" podID="3f559063-f4d6-4d59-988c-a1838761d3af" containerID="026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7" exitCode=0 Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.589111 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.589161 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"3f559063-f4d6-4d59-988c-a1838761d3af","Type":"ContainerDied","Data":"026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7"} Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.589189 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"3f559063-f4d6-4d59-988c-a1838761d3af","Type":"ContainerDied","Data":"c709ea2d8abc922b29c60039f4d18e79936c1157eeb6b0ddb9426494b264f09c"} Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.589206 4857 scope.go:117] "RemoveContainer" containerID="026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.617303 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.620716 4857 scope.go:117] "RemoveContainer" containerID="026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7" Nov 28 15:06:52 crc kubenswrapper[4857]: E1128 15:06:52.621065 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7\": container with ID starting with 026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7 not found: ID does not exist" containerID="026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.621089 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7"} err="failed to get container status \"026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7\": rpc error: code = NotFound desc = could not find container \"026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7\": container with ID starting with 026d78da318ee802039d1a5c6d58bb3d08a3e6cf298afcd69ba8d95e48aadff7 not found: ID does not exist" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.643990 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.661989 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:06:52 crc kubenswrapper[4857]: E1128 15:06:52.662374 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerName="dnsmasq-dns" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.662395 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerName="dnsmasq-dns" Nov 28 15:06:52 crc kubenswrapper[4857]: E1128 15:06:52.662415 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerName="init" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.662423 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerName="init" Nov 28 15:06:52 crc kubenswrapper[4857]: E1128 15:06:52.662435 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f559063-f4d6-4d59-988c-a1838761d3af" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.662442 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f559063-f4d6-4d59-988c-a1838761d3af" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.662704 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c713cef-f071-4007-9b70-19a5867ff4d0" containerName="dnsmasq-dns" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.662724 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f559063-f4d6-4d59-988c-a1838761d3af" containerName="nova-cell1-novncproxy-novncproxy" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.663311 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.670010 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.676636 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.812320 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5abb31a-459c-48f7-bb30-eaee2b73da5a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.812715 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwhvc\" (UniqueName: \"kubernetes.io/projected/d5abb31a-459c-48f7-bb30-eaee2b73da5a-kube-api-access-nwhvc\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.812993 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5abb31a-459c-48f7-bb30-eaee2b73da5a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.915499 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwhvc\" (UniqueName: \"kubernetes.io/projected/d5abb31a-459c-48f7-bb30-eaee2b73da5a-kube-api-access-nwhvc\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.915565 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5abb31a-459c-48f7-bb30-eaee2b73da5a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.915662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5abb31a-459c-48f7-bb30-eaee2b73da5a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.922661 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d5abb31a-459c-48f7-bb30-eaee2b73da5a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.923164 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d5abb31a-459c-48f7-bb30-eaee2b73da5a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.934089 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwhvc\" (UniqueName: \"kubernetes.io/projected/d5abb31a-459c-48f7-bb30-eaee2b73da5a-kube-api-access-nwhvc\") pod \"nova-cell1-novncproxy-0\" (UID: \"d5abb31a-459c-48f7-bb30-eaee2b73da5a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:52 crc kubenswrapper[4857]: I1128 15:06:52.982988 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.245721 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.326419 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-config-data\") pod \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.326503 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2gf7v\" (UniqueName: \"kubernetes.io/projected/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-kube-api-access-2gf7v\") pod \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.326626 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-combined-ca-bundle\") pod \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\" (UID: \"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a\") " Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.333193 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-kube-api-access-2gf7v" (OuterVolumeSpecName: "kube-api-access-2gf7v") pod "10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" (UID: "10c85eb2-d5c9-47c5-b7e9-b24e96ae607a"). InnerVolumeSpecName "kube-api-access-2gf7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.349842 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.362304 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-config-data" (OuterVolumeSpecName: "config-data") pod "10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" (UID: "10c85eb2-d5c9-47c5-b7e9-b24e96ae607a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.376050 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" (UID: "10c85eb2-d5c9-47c5-b7e9-b24e96ae607a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.429597 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-config-data\") pod \"e106e94c-ae77-44e4-a276-4c01117a17f3\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.429713 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ps9b\" (UniqueName: \"kubernetes.io/projected/e106e94c-ae77-44e4-a276-4c01117a17f3-kube-api-access-2ps9b\") pod \"e106e94c-ae77-44e4-a276-4c01117a17f3\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.429819 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-combined-ca-bundle\") pod \"e106e94c-ae77-44e4-a276-4c01117a17f3\" (UID: \"e106e94c-ae77-44e4-a276-4c01117a17f3\") " Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.430209 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.430226 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.430238 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2gf7v\" (UniqueName: \"kubernetes.io/projected/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a-kube-api-access-2gf7v\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.436670 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e106e94c-ae77-44e4-a276-4c01117a17f3-kube-api-access-2ps9b" (OuterVolumeSpecName: "kube-api-access-2ps9b") pod "e106e94c-ae77-44e4-a276-4c01117a17f3" (UID: "e106e94c-ae77-44e4-a276-4c01117a17f3"). InnerVolumeSpecName "kube-api-access-2ps9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.454778 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-config-data" (OuterVolumeSpecName: "config-data") pod "e106e94c-ae77-44e4-a276-4c01117a17f3" (UID: "e106e94c-ae77-44e4-a276-4c01117a17f3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.458790 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e106e94c-ae77-44e4-a276-4c01117a17f3" (UID: "e106e94c-ae77-44e4-a276-4c01117a17f3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.516550 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.532506 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.534154 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e106e94c-ae77-44e4-a276-4c01117a17f3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.534228 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ps9b\" (UniqueName: \"kubernetes.io/projected/e106e94c-ae77-44e4-a276-4c01117a17f3-kube-api-access-2ps9b\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.609926 4857 generic.go:334] "Generic (PLEG): container finished" podID="e106e94c-ae77-44e4-a276-4c01117a17f3" containerID="b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd" exitCode=0 Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.610124 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612541 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e106e94c-ae77-44e4-a276-4c01117a17f3","Type":"ContainerDied","Data":"b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd"} Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612587 4857 generic.go:334] "Generic (PLEG): container finished" podID="10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" containerID="b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4" exitCode=0 Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612624 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"e106e94c-ae77-44e4-a276-4c01117a17f3","Type":"ContainerDied","Data":"1dc6a5c0a863de65e7778d5f873266e9b1cfb9b09c420a8c2692a5170a9810d0"} Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612664 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a","Type":"ContainerDied","Data":"b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4"} Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612689 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"10c85eb2-d5c9-47c5-b7e9-b24e96ae607a","Type":"ContainerDied","Data":"283dffd64472da77db56efaafa231b967a556a71fc52197fefbf1af779258327"} Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612767 4857 scope.go:117] "RemoveContainer" containerID="b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.612855 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.618236 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d5abb31a-459c-48f7-bb30-eaee2b73da5a","Type":"ContainerStarted","Data":"34fd758612d167a53f4394513969501525f1593a5899e5feb596e443804240dc"} Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.651311 4857 scope.go:117] "RemoveContainer" containerID="b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd" Nov 28 15:06:53 crc kubenswrapper[4857]: E1128 15:06:53.651801 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd\": container with ID starting with b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd not found: ID does not exist" containerID="b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.651837 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd"} err="failed to get container status \"b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd\": rpc error: code = NotFound desc = could not find container \"b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd\": container with ID starting with b8721ba56e2d3a81771b58046949919bd90dae1726d2b78d838e70335e0663cd not found: ID does not exist" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.651861 4857 scope.go:117] "RemoveContainer" containerID="b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.675546 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.692047 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.699628 4857 scope.go:117] "RemoveContainer" containerID="b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4" Nov 28 15:06:53 crc kubenswrapper[4857]: E1128 15:06:53.700310 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4\": container with ID starting with b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4 not found: ID does not exist" containerID="b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.700352 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4"} err="failed to get container status \"b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4\": rpc error: code = NotFound desc = could not find container \"b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4\": container with ID starting with b7291b5a642a8e065649d7b60e5faa703b2648928c39c13de5610ea3fc51b0f4 not found: ID does not exist" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.712159 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.723244 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.729100 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: E1128 15:06:53.729774 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" containerName="nova-scheduler-scheduler" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.729797 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" containerName="nova-scheduler-scheduler" Nov 28 15:06:53 crc kubenswrapper[4857]: E1128 15:06:53.729854 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e106e94c-ae77-44e4-a276-4c01117a17f3" containerName="nova-cell1-conductor-conductor" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.729866 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e106e94c-ae77-44e4-a276-4c01117a17f3" containerName="nova-cell1-conductor-conductor" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.730137 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e106e94c-ae77-44e4-a276-4c01117a17f3" containerName="nova-cell1-conductor-conductor" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.730170 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" containerName="nova-scheduler-scheduler" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.731228 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.733838 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.738538 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.740682 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.742226 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.751770 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.759076 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.841035 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtvkh\" (UniqueName: \"kubernetes.io/projected/41e24999-304b-407e-bc06-e9b21de89249-kube-api-access-jtvkh\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.841123 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.841184 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwfhs\" (UniqueName: \"kubernetes.io/projected/4d58ec9e-5301-4a49-9c34-2704d8db30e1-kube-api-access-dwfhs\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.841225 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.841295 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.841452 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-config-data\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.944842 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwfhs\" (UniqueName: \"kubernetes.io/projected/4d58ec9e-5301-4a49-9c34-2704d8db30e1-kube-api-access-dwfhs\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.944902 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.944981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.945024 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-config-data\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.945123 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtvkh\" (UniqueName: \"kubernetes.io/projected/41e24999-304b-407e-bc06-e9b21de89249-kube-api-access-jtvkh\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.945184 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.951140 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-config-data\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.951688 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.952863 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.953254 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.964078 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwfhs\" (UniqueName: \"kubernetes.io/projected/4d58ec9e-5301-4a49-9c34-2704d8db30e1-kube-api-access-dwfhs\") pod \"nova-scheduler-0\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " pod="openstack/nova-scheduler-0" Nov 28 15:06:53 crc kubenswrapper[4857]: I1128 15:06:53.964260 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtvkh\" (UniqueName: \"kubernetes.io/projected/41e24999-304b-407e-bc06-e9b21de89249-kube-api-access-jtvkh\") pod \"nova-cell1-conductor-0\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.070159 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.078063 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.273771 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10c85eb2-d5c9-47c5-b7e9-b24e96ae607a" path="/var/lib/kubelet/pods/10c85eb2-d5c9-47c5-b7e9-b24e96ae607a/volumes" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.275367 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f559063-f4d6-4d59-988c-a1838761d3af" path="/var/lib/kubelet/pods/3f559063-f4d6-4d59-988c-a1838761d3af/volumes" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.276218 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e106e94c-ae77-44e4-a276-4c01117a17f3" path="/var/lib/kubelet/pods/e106e94c-ae77-44e4-a276-4c01117a17f3/volumes" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.411114 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.485556 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.75:8775/\": read tcp 10.217.0.2:57770->10.217.1.75:8775: read: connection reset by peer" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.485793 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.75:8775/\": read tcp 10.217.0.2:57784->10.217.1.75:8775: read: connection reset by peer" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.524465 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.77:8774/\": read tcp 10.217.0.2:32790->10.217.1.77:8774: read: connection reset by peer" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.524477 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-api-0" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.77:8774/\": read tcp 10.217.0.2:32776->10.217.1.77:8774: read: connection reset by peer" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.553453 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-config-data\") pod \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.553630 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-combined-ca-bundle\") pod \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.553678 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z66k7\" (UniqueName: \"kubernetes.io/projected/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-kube-api-access-z66k7\") pod \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\" (UID: \"1478276e-09fe-4ff5-bf7e-235cc6c59ad2\") " Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.578746 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-kube-api-access-z66k7" (OuterVolumeSpecName: "kube-api-access-z66k7") pod "1478276e-09fe-4ff5-bf7e-235cc6c59ad2" (UID: "1478276e-09fe-4ff5-bf7e-235cc6c59ad2"). InnerVolumeSpecName "kube-api-access-z66k7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.607993 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-config-data" (OuterVolumeSpecName: "config-data") pod "1478276e-09fe-4ff5-bf7e-235cc6c59ad2" (UID: "1478276e-09fe-4ff5-bf7e-235cc6c59ad2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.623850 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1478276e-09fe-4ff5-bf7e-235cc6c59ad2" (UID: "1478276e-09fe-4ff5-bf7e-235cc6c59ad2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.652195 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.655938 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.656170 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.656186 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z66k7\" (UniqueName: \"kubernetes.io/projected/1478276e-09fe-4ff5-bf7e-235cc6c59ad2-kube-api-access-z66k7\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.659831 4857 generic.go:334] "Generic (PLEG): container finished" podID="1478276e-09fe-4ff5-bf7e-235cc6c59ad2" containerID="62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea" exitCode=0 Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.659913 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1478276e-09fe-4ff5-bf7e-235cc6c59ad2","Type":"ContainerDied","Data":"62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea"} Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.659922 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.659942 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1478276e-09fe-4ff5-bf7e-235cc6c59ad2","Type":"ContainerDied","Data":"8869393c16267847214bc2de4321089854ec7402e3a20c6412fc3614abfbf1d4"} Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.659976 4857 scope.go:117] "RemoveContainer" containerID="62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.671927 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d5abb31a-459c-48f7-bb30-eaee2b73da5a","Type":"ContainerStarted","Data":"2c06c9af2a80a5e4c7b2d1633e72ba4f780d3fb548466fa7cf447c6a70e95d69"} Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.674999 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.716737 4857 generic.go:334] "Generic (PLEG): container finished" podID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerID="f3a52c1f8a02ba8347f3dac283a1ecd7d65d2bcccd2c8682630fada1fffca893" exitCode=0 Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.716820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4926cdd-f4bc-4e14-8a05-d61260bb2649","Type":"ContainerDied","Data":"f3a52c1f8a02ba8347f3dac283a1ecd7d65d2bcccd2c8682630fada1fffca893"} Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.717885 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.717872786 podStartE2EDuration="2.717872786s" podCreationTimestamp="2025-11-28 15:06:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:54.692708984 +0000 UTC m=+5864.816650421" watchObservedRunningTime="2025-11-28 15:06:54.717872786 +0000 UTC m=+5864.841814223" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.728569 4857 generic.go:334] "Generic (PLEG): container finished" podID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerID="4a41bfb5eb3dfee23ccc5e5b15a4fdf64fc6618299adaea8e4cd98fe8f0bcb07" exitCode=0 Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.728634 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"abddfbcf-fdbc-4fae-acf2-29943c5e6732","Type":"ContainerDied","Data":"4a41bfb5eb3dfee23ccc5e5b15a4fdf64fc6618299adaea8e4cd98fe8f0bcb07"} Nov 28 15:06:54 crc kubenswrapper[4857]: W1128 15:06:54.762936 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d58ec9e_5301_4a49_9c34_2704d8db30e1.slice/crio-a246239882f35af31953de7f0e268f4db2aed2f0db3e3c0c9df10652720daf5c WatchSource:0}: Error finding container a246239882f35af31953de7f0e268f4db2aed2f0db3e3c0c9df10652720daf5c: Status 404 returned error can't find the container with id a246239882f35af31953de7f0e268f4db2aed2f0db3e3c0c9df10652720daf5c Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.806917 4857 scope.go:117] "RemoveContainer" containerID="62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea" Nov 28 15:06:54 crc kubenswrapper[4857]: E1128 15:06:54.809976 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea\": container with ID starting with 62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea not found: ID does not exist" containerID="62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.810021 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea"} err="failed to get container status \"62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea\": rpc error: code = NotFound desc = could not find container \"62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea\": container with ID starting with 62c31a6b99d612a7e0661d6e4a83c9853d264fe7c311bc62814b86ea7f4f7dea not found: ID does not exist" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.819376 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.836245 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.845569 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:06:54 crc kubenswrapper[4857]: E1128 15:06:54.846179 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1478276e-09fe-4ff5-bf7e-235cc6c59ad2" containerName="nova-cell0-conductor-conductor" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.846203 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1478276e-09fe-4ff5-bf7e-235cc6c59ad2" containerName="nova-cell0-conductor-conductor" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.846466 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1478276e-09fe-4ff5-bf7e-235cc6c59ad2" containerName="nova-cell0-conductor-conductor" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.847479 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.854031 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.857176 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.962384 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.962505 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96z2c\" (UniqueName: \"kubernetes.io/projected/bfd140a1-03fc-4dc7-9017-ab03325863e3-kube-api-access-96z2c\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:54 crc kubenswrapper[4857]: I1128 15:06:54.962567 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.006447 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.065803 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.066634 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.066736 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96z2c\" (UniqueName: \"kubernetes.io/projected/bfd140a1-03fc-4dc7-9017-ab03325863e3-kube-api-access-96z2c\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.067162 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.071653 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.086555 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.104972 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96z2c\" (UniqueName: \"kubernetes.io/projected/bfd140a1-03fc-4dc7-9017-ab03325863e3-kube-api-access-96z2c\") pod \"nova-cell0-conductor-0\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.166125 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168446 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9t8xc\" (UniqueName: \"kubernetes.io/projected/d4926cdd-f4bc-4e14-8a05-d61260bb2649-kube-api-access-9t8xc\") pod \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168504 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86nsw\" (UniqueName: \"kubernetes.io/projected/abddfbcf-fdbc-4fae-acf2-29943c5e6732-kube-api-access-86nsw\") pod \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168539 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4926cdd-f4bc-4e14-8a05-d61260bb2649-logs\") pod \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168654 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abddfbcf-fdbc-4fae-acf2-29943c5e6732-logs\") pod \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168702 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-combined-ca-bundle\") pod \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168786 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-combined-ca-bundle\") pod \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168822 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-config-data\") pod \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\" (UID: \"abddfbcf-fdbc-4fae-acf2-29943c5e6732\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.168867 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-config-data\") pod \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\" (UID: \"d4926cdd-f4bc-4e14-8a05-d61260bb2649\") " Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.169316 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4926cdd-f4bc-4e14-8a05-d61260bb2649-logs" (OuterVolumeSpecName: "logs") pod "d4926cdd-f4bc-4e14-8a05-d61260bb2649" (UID: "d4926cdd-f4bc-4e14-8a05-d61260bb2649"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.169561 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d4926cdd-f4bc-4e14-8a05-d61260bb2649-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.172328 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abddfbcf-fdbc-4fae-acf2-29943c5e6732-logs" (OuterVolumeSpecName: "logs") pod "abddfbcf-fdbc-4fae-acf2-29943c5e6732" (UID: "abddfbcf-fdbc-4fae-acf2-29943c5e6732"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.180125 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4926cdd-f4bc-4e14-8a05-d61260bb2649-kube-api-access-9t8xc" (OuterVolumeSpecName: "kube-api-access-9t8xc") pod "d4926cdd-f4bc-4e14-8a05-d61260bb2649" (UID: "d4926cdd-f4bc-4e14-8a05-d61260bb2649"). InnerVolumeSpecName "kube-api-access-9t8xc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.185030 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abddfbcf-fdbc-4fae-acf2-29943c5e6732-kube-api-access-86nsw" (OuterVolumeSpecName: "kube-api-access-86nsw") pod "abddfbcf-fdbc-4fae-acf2-29943c5e6732" (UID: "abddfbcf-fdbc-4fae-acf2-29943c5e6732"). InnerVolumeSpecName "kube-api-access-86nsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.218934 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "abddfbcf-fdbc-4fae-acf2-29943c5e6732" (UID: "abddfbcf-fdbc-4fae-acf2-29943c5e6732"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.226049 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-config-data" (OuterVolumeSpecName: "config-data") pod "abddfbcf-fdbc-4fae-acf2-29943c5e6732" (UID: "abddfbcf-fdbc-4fae-acf2-29943c5e6732"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.237031 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4926cdd-f4bc-4e14-8a05-d61260bb2649" (UID: "d4926cdd-f4bc-4e14-8a05-d61260bb2649"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.243093 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-config-data" (OuterVolumeSpecName: "config-data") pod "d4926cdd-f4bc-4e14-8a05-d61260bb2649" (UID: "d4926cdd-f4bc-4e14-8a05-d61260bb2649"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271243 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/abddfbcf-fdbc-4fae-acf2-29943c5e6732-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271304 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271318 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271327 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/abddfbcf-fdbc-4fae-acf2-29943c5e6732-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271337 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4926cdd-f4bc-4e14-8a05-d61260bb2649-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271345 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9t8xc\" (UniqueName: \"kubernetes.io/projected/d4926cdd-f4bc-4e14-8a05-d61260bb2649-kube-api-access-9t8xc\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.271354 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86nsw\" (UniqueName: \"kubernetes.io/projected/abddfbcf-fdbc-4fae-acf2-29943c5e6732-kube-api-access-86nsw\") on node \"crc\" DevicePath \"\"" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.660281 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.750785 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"41e24999-304b-407e-bc06-e9b21de89249","Type":"ContainerStarted","Data":"aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.750835 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"41e24999-304b-407e-bc06-e9b21de89249","Type":"ContainerStarted","Data":"21a55d84d2fdfae0795d4ab847143bfcca93fa09a4230db68de7d5e7688c4a8a"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.750925 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.756477 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4d58ec9e-5301-4a49-9c34-2704d8db30e1","Type":"ContainerStarted","Data":"4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.756516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4d58ec9e-5301-4a49-9c34-2704d8db30e1","Type":"ContainerStarted","Data":"a246239882f35af31953de7f0e268f4db2aed2f0db3e3c0c9df10652720daf5c"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.770819 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfd140a1-03fc-4dc7-9017-ab03325863e3","Type":"ContainerStarted","Data":"27eece7c6f76a69638d5dacd410790b626b3124e85affa30966dd9cc03dfde21"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.776758 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.77674408 podStartE2EDuration="2.77674408s" podCreationTimestamp="2025-11-28 15:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:55.768025227 +0000 UTC m=+5865.891966724" watchObservedRunningTime="2025-11-28 15:06:55.77674408 +0000 UTC m=+5865.900685517" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.797985 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.797970407 podStartE2EDuration="2.797970407s" podCreationTimestamp="2025-11-28 15:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:55.795248014 +0000 UTC m=+5865.919189461" watchObservedRunningTime="2025-11-28 15:06:55.797970407 +0000 UTC m=+5865.921911844" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.798647 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"d4926cdd-f4bc-4e14-8a05-d61260bb2649","Type":"ContainerDied","Data":"eac2e809e7958698f3756c2e7e1f5d1e9c7cc1324add79c32d21bc9a8795edd1"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.798717 4857 scope.go:117] "RemoveContainer" containerID="f3a52c1f8a02ba8347f3dac283a1ecd7d65d2bcccd2c8682630fada1fffca893" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.798934 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.803821 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.803975 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"abddfbcf-fdbc-4fae-acf2-29943c5e6732","Type":"ContainerDied","Data":"d96844a51f54d99eff726fcddfbf445451146c7aabbbde373979b25bc2070ad0"} Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.858077 4857 scope.go:117] "RemoveContainer" containerID="efc47ae08a731d27ec108edff3adc7aa74baff19d905dfb91c1371f98a8fc9e7" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.887585 4857 scope.go:117] "RemoveContainer" containerID="4a41bfb5eb3dfee23ccc5e5b15a4fdf64fc6618299adaea8e4cd98fe8f0bcb07" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.896274 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.921316 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.933608 4857 scope.go:117] "RemoveContainer" containerID="b3e8eb7cf2f9075a147ce977c85d7b0eb30cc3667c8da9de79d169734da27a50" Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.968662 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:06:55 crc kubenswrapper[4857]: I1128 15:06:55.995137 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.005740 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: E1128 15:06:56.006361 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-log" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006384 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-log" Nov 28 15:06:56 crc kubenswrapper[4857]: E1128 15:06:56.006398 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-log" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006409 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-log" Nov 28 15:06:56 crc kubenswrapper[4857]: E1128 15:06:56.006432 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-metadata" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006440 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-metadata" Nov 28 15:06:56 crc kubenswrapper[4857]: E1128 15:06:56.006467 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-api" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006474 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-api" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006672 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-log" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006693 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" containerName="nova-api-api" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006717 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-metadata" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.006735 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" containerName="nova-metadata-log" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.008693 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.014105 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.014258 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.021550 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.024701 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.028369 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.029870 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.091400 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.091733 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-config-data\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.091856 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-config-data\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.092001 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-logs\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.092340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2eff5643-12a8-4549-a2f3-19aee5ea63b4-logs\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.092530 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64rk6\" (UniqueName: \"kubernetes.io/projected/2eff5643-12a8-4549-a2f3-19aee5ea63b4-kube-api-access-64rk6\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.092929 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.093140 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp6kv\" (UniqueName: \"kubernetes.io/projected/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-kube-api-access-vp6kv\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195259 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195328 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-config-data\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195372 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-config-data\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195398 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-logs\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195446 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2eff5643-12a8-4549-a2f3-19aee5ea63b4-logs\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195492 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64rk6\" (UniqueName: \"kubernetes.io/projected/2eff5643-12a8-4549-a2f3-19aee5ea63b4-kube-api-access-64rk6\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195526 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.195592 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp6kv\" (UniqueName: \"kubernetes.io/projected/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-kube-api-access-vp6kv\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.196381 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-logs\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.196516 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2eff5643-12a8-4549-a2f3-19aee5ea63b4-logs\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.200991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.201037 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-config-data\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.201125 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-config-data\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.202097 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.214011 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64rk6\" (UniqueName: \"kubernetes.io/projected/2eff5643-12a8-4549-a2f3-19aee5ea63b4-kube-api-access-64rk6\") pod \"nova-metadata-0\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.215801 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp6kv\" (UniqueName: \"kubernetes.io/projected/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-kube-api-access-vp6kv\") pod \"nova-api-0\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.245768 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1478276e-09fe-4ff5-bf7e-235cc6c59ad2" path="/var/lib/kubelet/pods/1478276e-09fe-4ff5-bf7e-235cc6c59ad2/volumes" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.247374 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abddfbcf-fdbc-4fae-acf2-29943c5e6732" path="/var/lib/kubelet/pods/abddfbcf-fdbc-4fae-acf2-29943c5e6732/volumes" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.248670 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4926cdd-f4bc-4e14-8a05-d61260bb2649" path="/var/lib/kubelet/pods/d4926cdd-f4bc-4e14-8a05-d61260bb2649/volumes" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.332834 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.346628 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.824481 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfd140a1-03fc-4dc7-9017-ab03325863e3","Type":"ContainerStarted","Data":"f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2"} Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.825054 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.850849 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.850828039 podStartE2EDuration="2.850828039s" podCreationTimestamp="2025-11-28 15:06:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:56.843210146 +0000 UTC m=+5866.967151603" watchObservedRunningTime="2025-11-28 15:06:56.850828039 +0000 UTC m=+5866.974769476" Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.876112 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: W1128 15:06:56.883889 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c9c9b78_b11f_4e58_a502_3c1f4c3e837b.slice/crio-acf914881e963d4d39f552e6f129cbe6c82960bb8a72b49c7f4b74f028de32f9 WatchSource:0}: Error finding container acf914881e963d4d39f552e6f129cbe6c82960bb8a72b49c7f4b74f028de32f9: Status 404 returned error can't find the container with id acf914881e963d4d39f552e6f129cbe6c82960bb8a72b49c7f4b74f028de32f9 Nov 28 15:06:56 crc kubenswrapper[4857]: I1128 15:06:56.930124 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 15:06:56 crc kubenswrapper[4857]: W1128 15:06:56.934833 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2eff5643_12a8_4549_a2f3_19aee5ea63b4.slice/crio-7f50e01d4adba1aed9bd2dcd6a5b660a397bb0c5622c27b71a521c7cf47cef74 WatchSource:0}: Error finding container 7f50e01d4adba1aed9bd2dcd6a5b660a397bb0c5622c27b71a521c7cf47cef74: Status 404 returned error can't find the container with id 7f50e01d4adba1aed9bd2dcd6a5b660a397bb0c5622c27b71a521c7cf47cef74 Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.836146 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2eff5643-12a8-4549-a2f3-19aee5ea63b4","Type":"ContainerStarted","Data":"5acc0626a52bad7524203ca02e10c4c0fffa2729264edbdfe63e3c5d3e7aa2b9"} Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.837040 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2eff5643-12a8-4549-a2f3-19aee5ea63b4","Type":"ContainerStarted","Data":"bcfcd63b811032d62e3e2e24a1e7da6a701d8fd597f92bc88db9e950d0a5004c"} Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.837056 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2eff5643-12a8-4549-a2f3-19aee5ea63b4","Type":"ContainerStarted","Data":"7f50e01d4adba1aed9bd2dcd6a5b660a397bb0c5622c27b71a521c7cf47cef74"} Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.838589 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b","Type":"ContainerStarted","Data":"b5d696117642bc1f98c754b7864abc086a755e8fb92918f8591d2e1ce6393d45"} Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.838670 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b","Type":"ContainerStarted","Data":"5420d7647e4cd92b8d2b3546873c0b0d3ce467e1452887f1299f47651d3525bc"} Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.838695 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b","Type":"ContainerStarted","Data":"acf914881e963d4d39f552e6f129cbe6c82960bb8a72b49c7f4b74f028de32f9"} Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.855983 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.855963556 podStartE2EDuration="2.855963556s" podCreationTimestamp="2025-11-28 15:06:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:57.851851776 +0000 UTC m=+5867.975793213" watchObservedRunningTime="2025-11-28 15:06:57.855963556 +0000 UTC m=+5867.979905003" Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.885464 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.885446714 podStartE2EDuration="2.885446714s" podCreationTimestamp="2025-11-28 15:06:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:06:57.876456064 +0000 UTC m=+5868.000397521" watchObservedRunningTime="2025-11-28 15:06:57.885446714 +0000 UTC m=+5868.009388151" Nov 28 15:06:57 crc kubenswrapper[4857]: I1128 15:06:57.983682 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:06:59 crc kubenswrapper[4857]: I1128 15:06:59.078099 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 15:07:00 crc kubenswrapper[4857]: I1128 15:07:00.201659 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 15:07:01 crc kubenswrapper[4857]: I1128 15:07:01.347017 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:07:01 crc kubenswrapper[4857]: I1128 15:07:01.347367 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 15:07:02 crc kubenswrapper[4857]: I1128 15:07:02.984031 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:07:02 crc kubenswrapper[4857]: I1128 15:07:02.997229 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:07:03 crc kubenswrapper[4857]: I1128 15:07:03.229913 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:07:03 crc kubenswrapper[4857]: E1128 15:07:03.231010 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:07:03 crc kubenswrapper[4857]: I1128 15:07:03.920257 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 28 15:07:04 crc kubenswrapper[4857]: I1128 15:07:04.080992 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 15:07:04 crc kubenswrapper[4857]: I1128 15:07:04.106296 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 15:07:04 crc kubenswrapper[4857]: I1128 15:07:04.107835 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 15:07:04 crc kubenswrapper[4857]: I1128 15:07:04.951740 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 15:07:06 crc kubenswrapper[4857]: I1128 15:07:06.333407 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:07:06 crc kubenswrapper[4857]: I1128 15:07:06.333917 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 15:07:06 crc kubenswrapper[4857]: I1128 15:07:06.350415 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:07:06 crc kubenswrapper[4857]: I1128 15:07:06.350671 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 15:07:07 crc kubenswrapper[4857]: I1128 15:07:07.500269 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.89:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:07:07 crc kubenswrapper[4857]: I1128 15:07:07.500327 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.89:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:07:07 crc kubenswrapper[4857]: I1128 15:07:07.500262 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.88:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:07:07 crc kubenswrapper[4857]: I1128 15:07:07.500269 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.88:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.359292 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.362342 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.364808 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.370357 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.455554 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.455624 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2357330a-f9bf-4702-a862-8fafa3f224ba-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.455654 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmhxf\" (UniqueName: \"kubernetes.io/projected/2357330a-f9bf-4702-a862-8fafa3f224ba-kube-api-access-wmhxf\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.455723 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.455746 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-scripts\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.455765 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.557047 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.557334 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-scripts\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.557419 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.557541 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.557670 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2357330a-f9bf-4702-a862-8fafa3f224ba-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.557776 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmhxf\" (UniqueName: \"kubernetes.io/projected/2357330a-f9bf-4702-a862-8fafa3f224ba-kube-api-access-wmhxf\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.558044 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2357330a-f9bf-4702-a862-8fafa3f224ba-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.563683 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.563780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-scripts\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.564490 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.565762 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.572588 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmhxf\" (UniqueName: \"kubernetes.io/projected/2357330a-f9bf-4702-a862-8fafa3f224ba-kube-api-access-wmhxf\") pod \"cinder-scheduler-0\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:10 crc kubenswrapper[4857]: I1128 15:07:10.694112 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:07:11 crc kubenswrapper[4857]: I1128 15:07:11.199782 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.017025 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2357330a-f9bf-4702-a862-8fafa3f224ba","Type":"ContainerStarted","Data":"6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f"} Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.017736 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2357330a-f9bf-4702-a862-8fafa3f224ba","Type":"ContainerStarted","Data":"afc14c4d6c94a24543884df0b18b643097f5aa194820eed93c026efbe2ef1338"} Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.059586 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.059833 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api-log" containerID="cri-o://b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9" gracePeriod=30 Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.059887 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api" containerID="cri-o://f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809" gracePeriod=30 Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.686821 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.689305 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.692408 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-volume-volume1-config-data" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.713874 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805041 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-sys\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805313 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805501 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805605 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805732 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805807 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805875 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.805946 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-run\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806053 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806118 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-dev\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806208 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806279 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/77a9c727-7461-4b83-b446-b9958a4de940-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806347 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806426 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806498 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jmgr\" (UniqueName: \"kubernetes.io/projected/77a9c727-7461-4b83-b446-b9958a4de940-kube-api-access-8jmgr\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.806566 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908440 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908568 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908599 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908628 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908655 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-run\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908709 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-dev\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908739 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908769 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/77a9c727-7461-4b83-b446-b9958a4de940-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908772 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-run\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908792 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908820 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-machine-id\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908834 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908847 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-lib-modules\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jmgr\" (UniqueName: \"kubernetes.io/projected/77a9c727-7461-4b83-b446-b9958a4de940-kube-api-access-8jmgr\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908900 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908968 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-sys\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908992 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.909020 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.909119 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-iscsi\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-locks-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.908827 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-etc-nvme\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.909444 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-dev\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.909475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-sys\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.909554 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-lib-cinder\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.909632 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/77a9c727-7461-4b83-b446-b9958a4de940-var-locks-brick\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.915227 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/77a9c727-7461-4b83-b446-b9958a4de940-ceph\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.917398 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-scripts\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.918556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-config-data-custom\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.919729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-config-data\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.927587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jmgr\" (UniqueName: \"kubernetes.io/projected/77a9c727-7461-4b83-b446-b9958a4de940-kube-api-access-8jmgr\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:12 crc kubenswrapper[4857]: I1128 15:07:12.931932 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77a9c727-7461-4b83-b446-b9958a4de940-combined-ca-bundle\") pod \"cinder-volume-volume1-0\" (UID: \"77a9c727-7461-4b83-b446-b9958a4de940\") " pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.010257 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.026832 4857 generic.go:334] "Generic (PLEG): container finished" podID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerID="b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9" exitCode=143 Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.026909 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6fa8fc93-7ca7-489a-a24c-fd617f50b178","Type":"ContainerDied","Data":"b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9"} Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.028897 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2357330a-f9bf-4702-a862-8fafa3f224ba","Type":"ContainerStarted","Data":"3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df"} Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.054091 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.054074358 podStartE2EDuration="3.054074358s" podCreationTimestamp="2025-11-28 15:07:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:07:13.052636219 +0000 UTC m=+5883.176577676" watchObservedRunningTime="2025-11-28 15:07:13.054074358 +0000 UTC m=+5883.178015795" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.346832 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-backup-0"] Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.349343 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.351533 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-backup-config-data" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.356083 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.425961 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bzxf\" (UniqueName: \"kubernetes.io/projected/fd47b944-e376-44ce-888f-f5f75474d0d9-kube-api-access-2bzxf\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426020 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-lib-modules\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426044 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426060 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-sys\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426081 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fd47b944-e376-44ce-888f-f5f75474d0d9-ceph\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426116 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-run\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426147 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426163 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426182 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426200 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-scripts\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426231 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426257 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426297 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-dev\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426315 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-config-data\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.426358 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528355 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-config-data\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528421 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528467 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528519 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bzxf\" (UniqueName: \"kubernetes.io/projected/fd47b944-e376-44ce-888f-f5f75474d0d9-kube-api-access-2bzxf\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528547 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-lib-modules\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528574 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528598 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-sys\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528622 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fd47b944-e376-44ce-888f-f5f75474d0d9-ceph\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528667 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-run\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528710 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528730 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528760 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528782 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-scripts\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528824 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528859 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.528909 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-dev\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.529027 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dev\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-dev\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.529888 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-brick\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-locks-brick\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.529924 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-run\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.529980 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-locks-cinder\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-locks-cinder\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.530006 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-iscsi\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-iscsi\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.530069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-nvme\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-nvme\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.530148 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sys\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-sys\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.530169 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-etc-machine-id\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.530215 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-lib-modules\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.530261 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-cinder\" (UniqueName: \"kubernetes.io/host-path/fd47b944-e376-44ce-888f-f5f75474d0d9-var-lib-cinder\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.538021 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-scripts\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.538086 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-config-data\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.539582 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/fd47b944-e376-44ce-888f-f5f75474d0d9-ceph\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.542567 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-config-data-custom\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.553655 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fd47b944-e376-44ce-888f-f5f75474d0d9-combined-ca-bundle\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.563845 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bzxf\" (UniqueName: \"kubernetes.io/projected/fd47b944-e376-44ce-888f-f5f75474d0d9-kube-api-access-2bzxf\") pod \"cinder-backup-0\" (UID: \"fd47b944-e376-44ce-888f-f5f75474d0d9\") " pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.677897 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-backup-0" Nov 28 15:07:13 crc kubenswrapper[4857]: I1128 15:07:13.760475 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-volume-volume1-0"] Nov 28 15:07:13 crc kubenswrapper[4857]: W1128 15:07:13.766920 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod77a9c727_7461_4b83_b446_b9958a4de940.slice/crio-162461c0c829fb5b43a23c0d6525b7e0a4a69776ec62e4f573714421dc0b9b66 WatchSource:0}: Error finding container 162461c0c829fb5b43a23c0d6525b7e0a4a69776ec62e4f573714421dc0b9b66: Status 404 returned error can't find the container with id 162461c0c829fb5b43a23c0d6525b7e0a4a69776ec62e4f573714421dc0b9b66 Nov 28 15:07:14 crc kubenswrapper[4857]: I1128 15:07:14.040936 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"77a9c727-7461-4b83-b446-b9958a4de940","Type":"ContainerStarted","Data":"162461c0c829fb5b43a23c0d6525b7e0a4a69776ec62e4f573714421dc0b9b66"} Nov 28 15:07:14 crc kubenswrapper[4857]: I1128 15:07:14.244672 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-backup-0"] Nov 28 15:07:15 crc kubenswrapper[4857]: I1128 15:07:15.052024 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"fd47b944-e376-44ce-888f-f5f75474d0d9","Type":"ContainerStarted","Data":"8e8963287057ca3fbfac84c62188d11294d4078bf3d53f6b65f33a71c86effae"} Nov 28 15:07:15 crc kubenswrapper[4857]: I1128 15:07:15.054328 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"77a9c727-7461-4b83-b446-b9958a4de940","Type":"ContainerStarted","Data":"9f2f458a260252c24b69d2914e58deb243709c0a45fe0e5884b38c07a9da6090"} Nov 28 15:07:15 crc kubenswrapper[4857]: I1128 15:07:15.694217 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.070340 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"fd47b944-e376-44ce-888f-f5f75474d0d9","Type":"ContainerStarted","Data":"b0a5b1f3fa6efd84753e11221dcbc043b2d8e86bbdf8270f4ae70752a2d12a5f"} Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.074199 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-backup-0" event={"ID":"fd47b944-e376-44ce-888f-f5f75474d0d9","Type":"ContainerStarted","Data":"11041c81741672ae6e317ca72f4a3427428b2cb6135fb307afa2038fc63f39e8"} Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.076706 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-volume-volume1-0" event={"ID":"77a9c727-7461-4b83-b446-b9958a4de940","Type":"ContainerStarted","Data":"69b379d9572bfdf6f681df120420822e072186d9ccd3518c9dcf6108336854cd"} Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.104561 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-backup-0" podStartSLOduration=1.9835282250000001 podStartE2EDuration="3.104533677s" podCreationTimestamp="2025-11-28 15:07:13 +0000 UTC" firstStartedPulling="2025-11-28 15:07:14.243475138 +0000 UTC m=+5884.367416575" lastFinishedPulling="2025-11-28 15:07:15.36448059 +0000 UTC m=+5885.488422027" observedRunningTime="2025-11-28 15:07:16.102787981 +0000 UTC m=+5886.226729418" watchObservedRunningTime="2025-11-28 15:07:16.104533677 +0000 UTC m=+5886.228475144" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.146347 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-volume-volume1-0" podStartSLOduration=3.251411379 podStartE2EDuration="4.146314833s" podCreationTimestamp="2025-11-28 15:07:12 +0000 UTC" firstStartedPulling="2025-11-28 15:07:13.771762698 +0000 UTC m=+5883.895704145" lastFinishedPulling="2025-11-28 15:07:14.666666162 +0000 UTC m=+5884.790607599" observedRunningTime="2025-11-28 15:07:16.136728657 +0000 UTC m=+5886.260670094" watchObservedRunningTime="2025-11-28 15:07:16.146314833 +0000 UTC m=+5886.270256300" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.337446 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.338524 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.342010 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.342376 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.350932 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.356281 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.358690 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 15:07:16 crc kubenswrapper[4857]: I1128 15:07:16.520470 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api" probeResult="failure" output="Get \"http://10.217.1.83:8776/healthcheck\": read tcp 10.217.0.2:52642->10.217.1.83:8776: read: connection reset by peer" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.069397 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.106529 4857 generic.go:334] "Generic (PLEG): container finished" podID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerID="f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809" exitCode=0 Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.107547 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6fa8fc93-7ca7-489a-a24c-fd617f50b178","Type":"ContainerDied","Data":"f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809"} Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.107622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"6fa8fc93-7ca7-489a-a24c-fd617f50b178","Type":"ContainerDied","Data":"81c53b761761358988fe165149142aee5b141fbf8e8d2187c1b55e53d38aa8eb"} Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.107651 4857 scope.go:117] "RemoveContainer" containerID="f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.107965 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.108938 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.111799 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.113710 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data-custom\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.113764 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6fa8fc93-7ca7-489a-a24c-fd617f50b178-etc-machine-id\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.113999 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mxfw\" (UniqueName: \"kubernetes.io/projected/6fa8fc93-7ca7-489a-a24c-fd617f50b178-kube-api-access-7mxfw\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.114074 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fa8fc93-7ca7-489a-a24c-fd617f50b178-logs\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.114117 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-combined-ca-bundle\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.114157 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.114380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-scripts\") pod \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\" (UID: \"6fa8fc93-7ca7-489a-a24c-fd617f50b178\") " Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.117284 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fa8fc93-7ca7-489a-a24c-fd617f50b178-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.118585 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6fa8fc93-7ca7-489a-a24c-fd617f50b178-logs" (OuterVolumeSpecName: "logs") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.119122 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.128053 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fa8fc93-7ca7-489a-a24c-fd617f50b178-kube-api-access-7mxfw" (OuterVolumeSpecName: "kube-api-access-7mxfw") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "kube-api-access-7mxfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.139404 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-scripts" (OuterVolumeSpecName: "scripts") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.142548 4857 scope.go:117] "RemoveContainer" containerID="b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.143600 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.151849 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.218484 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6fa8fc93-7ca7-489a-a24c-fd617f50b178-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.218806 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.218924 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.228356 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.228446 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/6fa8fc93-7ca7-489a-a24c-fd617f50b178-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.228519 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mxfw\" (UniqueName: \"kubernetes.io/projected/6fa8fc93-7ca7-489a-a24c-fd617f50b178-kube-api-access-7mxfw\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.229903 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:07:17 crc kubenswrapper[4857]: E1128 15:07:17.230195 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.251156 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data" (OuterVolumeSpecName: "config-data") pod "6fa8fc93-7ca7-489a-a24c-fd617f50b178" (UID: "6fa8fc93-7ca7-489a-a24c-fd617f50b178"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.288620 4857 scope.go:117] "RemoveContainer" containerID="f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809" Nov 28 15:07:17 crc kubenswrapper[4857]: E1128 15:07:17.289704 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809\": container with ID starting with f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809 not found: ID does not exist" containerID="f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.289750 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809"} err="failed to get container status \"f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809\": rpc error: code = NotFound desc = could not find container \"f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809\": container with ID starting with f44057963a725c18ea4d3c556814f3b500d4d64c8ade56468c52456b86043809 not found: ID does not exist" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.289783 4857 scope.go:117] "RemoveContainer" containerID="b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9" Nov 28 15:07:17 crc kubenswrapper[4857]: E1128 15:07:17.290216 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9\": container with ID starting with b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9 not found: ID does not exist" containerID="b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.290238 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9"} err="failed to get container status \"b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9\": rpc error: code = NotFound desc = could not find container \"b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9\": container with ID starting with b5bb3f0ba3711311e335f51955048b067b26a96feff2b8fc060dd9a597cb8cf9 not found: ID does not exist" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.330839 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6fa8fc93-7ca7-489a-a24c-fd617f50b178-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.445548 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.462681 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.475232 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:07:17 crc kubenswrapper[4857]: E1128 15:07:17.475601 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.475617 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api" Nov 28 15:07:17 crc kubenswrapper[4857]: E1128 15:07:17.475634 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api-log" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.475640 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api-log" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.475832 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.475853 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" containerName="cinder-api-log" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.476835 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.482338 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.493582 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.535218 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.535502 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-config-data\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.535706 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.535975 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-scripts\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.536304 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-logs\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.536338 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swdsr\" (UniqueName: \"kubernetes.io/projected/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-kube-api-access-swdsr\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.536373 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637476 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-scripts\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637637 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-logs\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637659 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swdsr\" (UniqueName: \"kubernetes.io/projected/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-kube-api-access-swdsr\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637680 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637711 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637728 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-config-data\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.637779 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.638225 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.638293 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-logs\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.641128 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.641786 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-scripts\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.642341 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.653311 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-config-data\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.664991 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swdsr\" (UniqueName: \"kubernetes.io/projected/f4667b41-d1a9-4a7b-9756-1a3d1e8e5170-kube-api-access-swdsr\") pod \"cinder-api-0\" (UID: \"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170\") " pod="openstack/cinder-api-0" Nov 28 15:07:17 crc kubenswrapper[4857]: I1128 15:07:17.802656 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 28 15:07:18 crc kubenswrapper[4857]: I1128 15:07:18.014127 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:18 crc kubenswrapper[4857]: I1128 15:07:18.244561 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fa8fc93-7ca7-489a-a24c-fd617f50b178" path="/var/lib/kubelet/pods/6fa8fc93-7ca7-489a-a24c-fd617f50b178/volumes" Nov 28 15:07:18 crc kubenswrapper[4857]: I1128 15:07:18.361093 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 28 15:07:18 crc kubenswrapper[4857]: I1128 15:07:18.679102 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-backup-0" Nov 28 15:07:19 crc kubenswrapper[4857]: I1128 15:07:19.137990 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170","Type":"ContainerStarted","Data":"5acd4181e77425b2377c03c835d968d354627e49701a9a54752149f0a611f447"} Nov 28 15:07:19 crc kubenswrapper[4857]: I1128 15:07:19.138406 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170","Type":"ContainerStarted","Data":"cb9a2899469a9fe9bb5c8f8f54f766a0676003c90957584ef5d887f10ccfea28"} Nov 28 15:07:20 crc kubenswrapper[4857]: I1128 15:07:20.154796 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4667b41-d1a9-4a7b-9756-1a3d1e8e5170","Type":"ContainerStarted","Data":"bc63bc75e7a37e5ea85d35cf7927983331ac20593fd8baf48c12f86a0a2f41fa"} Nov 28 15:07:20 crc kubenswrapper[4857]: I1128 15:07:20.156364 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 28 15:07:20 crc kubenswrapper[4857]: I1128 15:07:20.197460 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.197428611 podStartE2EDuration="3.197428611s" podCreationTimestamp="2025-11-28 15:07:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:07:20.179143573 +0000 UTC m=+5890.303085040" watchObservedRunningTime="2025-11-28 15:07:20.197428611 +0000 UTC m=+5890.321370038" Nov 28 15:07:20 crc kubenswrapper[4857]: I1128 15:07:20.894558 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 15:07:20 crc kubenswrapper[4857]: I1128 15:07:20.943248 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:21 crc kubenswrapper[4857]: I1128 15:07:21.163291 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="cinder-scheduler" containerID="cri-o://6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f" gracePeriod=30 Nov 28 15:07:21 crc kubenswrapper[4857]: I1128 15:07:21.163410 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="probe" containerID="cri-o://3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df" gracePeriod=30 Nov 28 15:07:22 crc kubenswrapper[4857]: I1128 15:07:22.200018 4857 generic.go:334] "Generic (PLEG): container finished" podID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerID="3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df" exitCode=0 Nov 28 15:07:22 crc kubenswrapper[4857]: I1128 15:07:22.203217 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2357330a-f9bf-4702-a862-8fafa3f224ba","Type":"ContainerDied","Data":"3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df"} Nov 28 15:07:23 crc kubenswrapper[4857]: I1128 15:07:23.286250 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-volume-volume1-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.021131 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-backup-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.151405 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.210370 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-scripts\") pod \"2357330a-f9bf-4702-a862-8fafa3f224ba\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.210506 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data-custom\") pod \"2357330a-f9bf-4702-a862-8fafa3f224ba\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.210539 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2357330a-f9bf-4702-a862-8fafa3f224ba-etc-machine-id\") pod \"2357330a-f9bf-4702-a862-8fafa3f224ba\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.210575 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data\") pod \"2357330a-f9bf-4702-a862-8fafa3f224ba\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.210604 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmhxf\" (UniqueName: \"kubernetes.io/projected/2357330a-f9bf-4702-a862-8fafa3f224ba-kube-api-access-wmhxf\") pod \"2357330a-f9bf-4702-a862-8fafa3f224ba\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.210664 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-combined-ca-bundle\") pod \"2357330a-f9bf-4702-a862-8fafa3f224ba\" (UID: \"2357330a-f9bf-4702-a862-8fafa3f224ba\") " Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.211015 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2357330a-f9bf-4702-a862-8fafa3f224ba-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2357330a-f9bf-4702-a862-8fafa3f224ba" (UID: "2357330a-f9bf-4702-a862-8fafa3f224ba"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.221414 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-scripts" (OuterVolumeSpecName: "scripts") pod "2357330a-f9bf-4702-a862-8fafa3f224ba" (UID: "2357330a-f9bf-4702-a862-8fafa3f224ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.221456 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2357330a-f9bf-4702-a862-8fafa3f224ba-kube-api-access-wmhxf" (OuterVolumeSpecName: "kube-api-access-wmhxf") pod "2357330a-f9bf-4702-a862-8fafa3f224ba" (UID: "2357330a-f9bf-4702-a862-8fafa3f224ba"). InnerVolumeSpecName "kube-api-access-wmhxf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.222065 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2357330a-f9bf-4702-a862-8fafa3f224ba" (UID: "2357330a-f9bf-4702-a862-8fafa3f224ba"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.233904 4857 generic.go:334] "Generic (PLEG): container finished" podID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerID="6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f" exitCode=0 Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.234061 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.285165 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2357330a-f9bf-4702-a862-8fafa3f224ba" (UID: "2357330a-f9bf-4702-a862-8fafa3f224ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.303288 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2357330a-f9bf-4702-a862-8fafa3f224ba","Type":"ContainerDied","Data":"6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f"} Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.303360 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"2357330a-f9bf-4702-a862-8fafa3f224ba","Type":"ContainerDied","Data":"afc14c4d6c94a24543884df0b18b643097f5aa194820eed93c026efbe2ef1338"} Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.303389 4857 scope.go:117] "RemoveContainer" containerID="3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.312444 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.312469 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.312479 4857 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2357330a-f9bf-4702-a862-8fafa3f224ba-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.312489 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmhxf\" (UniqueName: \"kubernetes.io/projected/2357330a-f9bf-4702-a862-8fafa3f224ba-kube-api-access-wmhxf\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.312498 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.330107 4857 scope.go:117] "RemoveContainer" containerID="6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.330201 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data" (OuterVolumeSpecName: "config-data") pod "2357330a-f9bf-4702-a862-8fafa3f224ba" (UID: "2357330a-f9bf-4702-a862-8fafa3f224ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.356632 4857 scope.go:117] "RemoveContainer" containerID="3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df" Nov 28 15:07:24 crc kubenswrapper[4857]: E1128 15:07:24.358095 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df\": container with ID starting with 3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df not found: ID does not exist" containerID="3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.358159 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df"} err="failed to get container status \"3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df\": rpc error: code = NotFound desc = could not find container \"3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df\": container with ID starting with 3ea011c45260042cdb2468cc4a561a32a25c054b4a3d01608605d41cedda77df not found: ID does not exist" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.358187 4857 scope.go:117] "RemoveContainer" containerID="6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f" Nov 28 15:07:24 crc kubenswrapper[4857]: E1128 15:07:24.359088 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f\": container with ID starting with 6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f not found: ID does not exist" containerID="6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.359115 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f"} err="failed to get container status \"6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f\": rpc error: code = NotFound desc = could not find container \"6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f\": container with ID starting with 6cc181dc9bffbdff0a1a3dff3c456a7760eeb83adda5d243be560e84d4f4156f not found: ID does not exist" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.417513 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2357330a-f9bf-4702-a862-8fafa3f224ba-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.575361 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.586623 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.607585 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:24 crc kubenswrapper[4857]: E1128 15:07:24.608174 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="probe" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.608200 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="probe" Nov 28 15:07:24 crc kubenswrapper[4857]: E1128 15:07:24.608235 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="cinder-scheduler" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.608244 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="cinder-scheduler" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.608571 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="cinder-scheduler" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.608599 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" containerName="probe" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.610910 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.614842 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.625672 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.625842 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnvxc\" (UniqueName: \"kubernetes.io/projected/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-kube-api-access-mnvxc\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.632475 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.640100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-config-data\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.640367 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-scripts\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.640468 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.640731 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.742831 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.742916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnvxc\" (UniqueName: \"kubernetes.io/projected/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-kube-api-access-mnvxc\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.743044 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-config-data\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.743075 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-scripts\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.743096 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.743129 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.743221 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.748120 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.748450 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-scripts\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.748584 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-config-data\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.753330 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.765251 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnvxc\" (UniqueName: \"kubernetes.io/projected/845ef7c8-2be0-41d1-b61d-9bdcfec6b018-kube-api-access-mnvxc\") pod \"cinder-scheduler-0\" (UID: \"845ef7c8-2be0-41d1-b61d-9bdcfec6b018\") " pod="openstack/cinder-scheduler-0" Nov 28 15:07:24 crc kubenswrapper[4857]: I1128 15:07:24.942361 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 28 15:07:25 crc kubenswrapper[4857]: I1128 15:07:25.470524 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 28 15:07:25 crc kubenswrapper[4857]: W1128 15:07:25.485685 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod845ef7c8_2be0_41d1_b61d_9bdcfec6b018.slice/crio-d8f89ecb6c8ee90fe2222596d1a85eeafeeee275c6a20647e0f342eeb1accd68 WatchSource:0}: Error finding container d8f89ecb6c8ee90fe2222596d1a85eeafeeee275c6a20647e0f342eeb1accd68: Status 404 returned error can't find the container with id d8f89ecb6c8ee90fe2222596d1a85eeafeeee275c6a20647e0f342eeb1accd68 Nov 28 15:07:26 crc kubenswrapper[4857]: I1128 15:07:26.243459 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2357330a-f9bf-4702-a862-8fafa3f224ba" path="/var/lib/kubelet/pods/2357330a-f9bf-4702-a862-8fafa3f224ba/volumes" Nov 28 15:07:26 crc kubenswrapper[4857]: I1128 15:07:26.279772 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"845ef7c8-2be0-41d1-b61d-9bdcfec6b018","Type":"ContainerStarted","Data":"6ff45742a3d8e85d0ccf127aba33948bb090713f990512c480f84da72aac99c5"} Nov 28 15:07:26 crc kubenswrapper[4857]: I1128 15:07:26.279846 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"845ef7c8-2be0-41d1-b61d-9bdcfec6b018","Type":"ContainerStarted","Data":"d8f89ecb6c8ee90fe2222596d1a85eeafeeee275c6a20647e0f342eeb1accd68"} Nov 28 15:07:27 crc kubenswrapper[4857]: I1128 15:07:27.313258 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"845ef7c8-2be0-41d1-b61d-9bdcfec6b018","Type":"ContainerStarted","Data":"357d6484fae79ee6fdc9b5eeaf6f0785507c62d84426df412d8a276bce30cdd8"} Nov 28 15:07:27 crc kubenswrapper[4857]: I1128 15:07:27.354498 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.35447077 podStartE2EDuration="3.35447077s" podCreationTimestamp="2025-11-28 15:07:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:07:27.346400285 +0000 UTC m=+5897.470341722" watchObservedRunningTime="2025-11-28 15:07:27.35447077 +0000 UTC m=+5897.478412247" Nov 28 15:07:29 crc kubenswrapper[4857]: I1128 15:07:29.623375 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 28 15:07:29 crc kubenswrapper[4857]: I1128 15:07:29.943599 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 28 15:07:30 crc kubenswrapper[4857]: I1128 15:07:30.239204 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:07:30 crc kubenswrapper[4857]: E1128 15:07:30.239727 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:07:35 crc kubenswrapper[4857]: I1128 15:07:35.175903 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 28 15:07:42 crc kubenswrapper[4857]: I1128 15:07:42.229609 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:07:42 crc kubenswrapper[4857]: E1128 15:07:42.231107 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:07:56 crc kubenswrapper[4857]: I1128 15:07:56.229585 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:07:56 crc kubenswrapper[4857]: E1128 15:07:56.230229 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:08:11 crc kubenswrapper[4857]: I1128 15:08:11.228751 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:08:11 crc kubenswrapper[4857]: E1128 15:08:11.229473 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:08:25 crc kubenswrapper[4857]: I1128 15:08:25.229805 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:08:25 crc kubenswrapper[4857]: E1128 15:08:25.231323 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:08:38 crc kubenswrapper[4857]: I1128 15:08:38.230289 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:08:38 crc kubenswrapper[4857]: E1128 15:08:38.231523 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:08:53 crc kubenswrapper[4857]: I1128 15:08:53.228569 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:08:53 crc kubenswrapper[4857]: E1128 15:08:53.229698 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:08:59 crc kubenswrapper[4857]: I1128 15:08:59.074589 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-qfdtd"] Nov 28 15:08:59 crc kubenswrapper[4857]: I1128 15:08:59.089386 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-qfdtd"] Nov 28 15:09:00 crc kubenswrapper[4857]: I1128 15:09:00.032188 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-1579-account-create-update-pt6kg"] Nov 28 15:09:00 crc kubenswrapper[4857]: I1128 15:09:00.044834 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-1579-account-create-update-pt6kg"] Nov 28 15:09:00 crc kubenswrapper[4857]: I1128 15:09:00.243431 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abde92d0-5a59-425b-8093-fa103d0a10e0" path="/var/lib/kubelet/pods/abde92d0-5a59-425b-8093-fa103d0a10e0/volumes" Nov 28 15:09:00 crc kubenswrapper[4857]: I1128 15:09:00.245251 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e369f54d-db0e-4b44-943c-b08090c27050" path="/var/lib/kubelet/pods/e369f54d-db0e-4b44-943c-b08090c27050/volumes" Nov 28 15:09:05 crc kubenswrapper[4857]: I1128 15:09:05.230029 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:09:05 crc kubenswrapper[4857]: E1128 15:09:05.231250 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:09:07 crc kubenswrapper[4857]: I1128 15:09:07.052873 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-jgj4x"] Nov 28 15:09:07 crc kubenswrapper[4857]: I1128 15:09:07.065541 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-jgj4x"] Nov 28 15:09:08 crc kubenswrapper[4857]: I1128 15:09:08.251339 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d673f490-5579-4fe5-b65c-0d1f9da6d976" path="/var/lib/kubelet/pods/d673f490-5579-4fe5-b65c-0d1f9da6d976/volumes" Nov 28 15:09:18 crc kubenswrapper[4857]: I1128 15:09:18.836807 4857 scope.go:117] "RemoveContainer" containerID="0f0a4a102d3e912ff3e948850ebca5e4a419f9611cd8dbb5e75ff81a38906ae8" Nov 28 15:09:18 crc kubenswrapper[4857]: I1128 15:09:18.886221 4857 scope.go:117] "RemoveContainer" containerID="e3639b1779810b9f09d8e2761ef459bfb05301e405f26b3ae36230c5390c326e" Nov 28 15:09:18 crc kubenswrapper[4857]: I1128 15:09:18.937778 4857 scope.go:117] "RemoveContainer" containerID="a9977b5aff16d66941afedf66511b2934faf30314b268b2ac5b8de5ea96b8f8e" Nov 28 15:09:18 crc kubenswrapper[4857]: I1128 15:09:18.984365 4857 scope.go:117] "RemoveContainer" containerID="84be4a0ed1b3a0a2ef55c4da5ce29451349d74551ad2cda4461c9f1d26d8526d" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.021835 4857 scope.go:117] "RemoveContainer" containerID="24016fc6ac5c2b457402908c6c60f64b6624aceb7ba63ee8f808706129bb2d08" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.429915 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hg2vm"] Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.432650 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.435373 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-vtz9s" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.435754 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.454983 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-nv5l9"] Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.458014 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.463057 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hg2vm"] Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508333 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-run-ovn\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508382 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktp5d\" (UniqueName: \"kubernetes.io/projected/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-kube-api-access-ktp5d\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508416 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-run\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508435 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-etc-ovs\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508464 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-log-ovn\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-log\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508520 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-lib\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508556 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-scripts\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508582 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5sxm\" (UniqueName: \"kubernetes.io/projected/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-kube-api-access-g5sxm\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508606 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-scripts\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.508644 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-run\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.547923 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-nv5l9"] Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611408 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-scripts\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611504 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5sxm\" (UniqueName: \"kubernetes.io/projected/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-kube-api-access-g5sxm\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611548 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-scripts\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611621 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-run\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611700 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-run-ovn\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611732 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktp5d\" (UniqueName: \"kubernetes.io/projected/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-kube-api-access-ktp5d\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611772 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-run\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611800 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-etc-ovs\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611850 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-log-ovn\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-log\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.611997 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-lib\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.612559 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-lib\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615304 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-run\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615361 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-run\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615318 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-etc-ovs\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615390 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-var-log\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615392 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-log-ovn\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-var-run-ovn\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.615651 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-scripts\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.617510 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-scripts\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.636809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5sxm\" (UniqueName: \"kubernetes.io/projected/2701173a-d9ca-4d1b-8e1a-a0ef8102f92d-kube-api-access-g5sxm\") pod \"ovn-controller-ovs-nv5l9\" (UID: \"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d\") " pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.641351 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktp5d\" (UniqueName: \"kubernetes.io/projected/5a653e57-11d1-4d14-9d0d-d8afbbfcffbc-kube-api-access-ktp5d\") pod \"ovn-controller-hg2vm\" (UID: \"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc\") " pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.787560 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:19 crc kubenswrapper[4857]: I1128 15:09:19.807126 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.230347 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:09:20 crc kubenswrapper[4857]: E1128 15:09:20.231483 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.337970 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hg2vm"] Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.758910 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-nv5l9"] Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.830624 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-nv5l9" event={"ID":"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d","Type":"ContainerStarted","Data":"393534213a28b5d5add4956c2231db7fc44f63916c4c9516cba45a37cc32200b"} Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.832459 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hg2vm" event={"ID":"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc","Type":"ContainerStarted","Data":"c9037baf30f6345c6d2707c84147a4455d491fca068fa95c13520123f1ba25d8"} Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.832482 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hg2vm" event={"ID":"5a653e57-11d1-4d14-9d0d-d8afbbfcffbc","Type":"ContainerStarted","Data":"27032e803cc8e9e0bb1a60959aeb45f0f5d453ccf91e7ed057a155486e5dda24"} Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.833115 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-hg2vm" Nov 28 15:09:20 crc kubenswrapper[4857]: I1128 15:09:20.865033 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hg2vm" podStartSLOduration=1.865010842 podStartE2EDuration="1.865010842s" podCreationTimestamp="2025-11-28 15:09:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:09:20.848861421 +0000 UTC m=+6010.972802868" watchObservedRunningTime="2025-11-28 15:09:20.865010842 +0000 UTC m=+6010.988952279" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.044138 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-qcn9g"] Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.050587 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-qcn9g"] Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.478034 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-c8jwf"] Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.479278 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.505734 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-c8jwf"] Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.559133 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7787446-63bc-4c06-8cd1-4b40ef714de8-operator-scripts\") pod \"octavia-db-create-c8jwf\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.559250 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km4lg\" (UniqueName: \"kubernetes.io/projected/f7787446-63bc-4c06-8cd1-4b40ef714de8-kube-api-access-km4lg\") pod \"octavia-db-create-c8jwf\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.661059 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7787446-63bc-4c06-8cd1-4b40ef714de8-operator-scripts\") pod \"octavia-db-create-c8jwf\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.661129 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km4lg\" (UniqueName: \"kubernetes.io/projected/f7787446-63bc-4c06-8cd1-4b40ef714de8-kube-api-access-km4lg\") pod \"octavia-db-create-c8jwf\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.663574 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7787446-63bc-4c06-8cd1-4b40ef714de8-operator-scripts\") pod \"octavia-db-create-c8jwf\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.692780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km4lg\" (UniqueName: \"kubernetes.io/projected/f7787446-63bc-4c06-8cd1-4b40ef714de8-kube-api-access-km4lg\") pod \"octavia-db-create-c8jwf\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.824006 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.874604 4857 generic.go:334] "Generic (PLEG): container finished" podID="2701173a-d9ca-4d1b-8e1a-a0ef8102f92d" containerID="c49c9c59bdb3ff518966dbc831afcb48c1a83aab2820ad46a4a7d0bac4d179a5" exitCode=0 Nov 28 15:09:21 crc kubenswrapper[4857]: I1128 15:09:21.875305 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-nv5l9" event={"ID":"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d","Type":"ContainerDied","Data":"c49c9c59bdb3ff518966dbc831afcb48c1a83aab2820ad46a4a7d0bac4d179a5"} Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.124222 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-zrjff"] Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.132364 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.135307 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.140174 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-zrjff"] Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.187724 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/39b5a11e-b819-4562-9b61-82421fcabe18-ovs-rundir\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.188092 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/39b5a11e-b819-4562-9b61-82421fcabe18-ovn-rundir\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.188411 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b5a11e-b819-4562-9b61-82421fcabe18-config\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.188574 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tblvj\" (UniqueName: \"kubernetes.io/projected/39b5a11e-b819-4562-9b61-82421fcabe18-kube-api-access-tblvj\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.250651 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="778872e4-550d-471f-989e-866581a3ba7b" path="/var/lib/kubelet/pods/778872e4-550d-471f-989e-866581a3ba7b/volumes" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.292067 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tblvj\" (UniqueName: \"kubernetes.io/projected/39b5a11e-b819-4562-9b61-82421fcabe18-kube-api-access-tblvj\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.292146 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/39b5a11e-b819-4562-9b61-82421fcabe18-ovs-rundir\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.292233 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/39b5a11e-b819-4562-9b61-82421fcabe18-ovn-rundir\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.292320 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b5a11e-b819-4562-9b61-82421fcabe18-config\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.292610 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/39b5a11e-b819-4562-9b61-82421fcabe18-ovs-rundir\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.292630 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/39b5a11e-b819-4562-9b61-82421fcabe18-ovn-rundir\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.293717 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39b5a11e-b819-4562-9b61-82421fcabe18-config\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.319997 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tblvj\" (UniqueName: \"kubernetes.io/projected/39b5a11e-b819-4562-9b61-82421fcabe18-kube-api-access-tblvj\") pod \"ovn-controller-metrics-zrjff\" (UID: \"39b5a11e-b819-4562-9b61-82421fcabe18\") " pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.466623 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-zrjff" Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.493809 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-c8jwf"] Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.894516 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-c8jwf" event={"ID":"f7787446-63bc-4c06-8cd1-4b40ef714de8","Type":"ContainerStarted","Data":"41ad8bfcaaffcfe59a32f71a896aeabbe93dd58d67acb1eba22c50f02cef8468"} Nov 28 15:09:22 crc kubenswrapper[4857]: I1128 15:09:22.901220 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-nv5l9" event={"ID":"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d","Type":"ContainerStarted","Data":"5deb0c1eeeae32b39be2bf597c30c3cf856e5add1cd6c19d3e45f3782d8c0324"} Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.121322 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-zrjff"] Nov 28 15:09:23 crc kubenswrapper[4857]: W1128 15:09:23.127742 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod39b5a11e_b819_4562_9b61_82421fcabe18.slice/crio-2e0d1c72c076e59fc2872d81ecb859648a30b0350c8b92d398d0326f5acd1298 WatchSource:0}: Error finding container 2e0d1c72c076e59fc2872d81ecb859648a30b0350c8b92d398d0326f5acd1298: Status 404 returned error can't find the container with id 2e0d1c72c076e59fc2872d81ecb859648a30b0350c8b92d398d0326f5acd1298 Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.858704 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-fe57-account-create-update-ft9d9"] Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.861789 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.866508 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.870404 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-fe57-account-create-update-ft9d9"] Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.932268 4857 generic.go:334] "Generic (PLEG): container finished" podID="f7787446-63bc-4c06-8cd1-4b40ef714de8" containerID="349359cd13104fa413935e63a54a4b6af67d587be8162717a95a7f05c40cd3c5" exitCode=0 Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.932354 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-c8jwf" event={"ID":"f7787446-63bc-4c06-8cd1-4b40ef714de8","Type":"ContainerDied","Data":"349359cd13104fa413935e63a54a4b6af67d587be8162717a95a7f05c40cd3c5"} Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.936231 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-zrjff" event={"ID":"39b5a11e-b819-4562-9b61-82421fcabe18","Type":"ContainerStarted","Data":"27f20f3649c22c594369bb0615bec4309d4d7ebf52519a2290bac233d169cb78"} Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.936268 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-zrjff" event={"ID":"39b5a11e-b819-4562-9b61-82421fcabe18","Type":"ContainerStarted","Data":"2e0d1c72c076e59fc2872d81ecb859648a30b0350c8b92d398d0326f5acd1298"} Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.940884 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-nv5l9" event={"ID":"2701173a-d9ca-4d1b-8e1a-a0ef8102f92d","Type":"ContainerStarted","Data":"70f8b9ed638c17cfd5872fcf11f976cd2f158d8fc3c5f09c57f7739ed9505ea0"} Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.941348 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.941489 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.949153 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90553bd-80df-4379-a1a7-dff0ca2619d3-operator-scripts\") pod \"octavia-fe57-account-create-update-ft9d9\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.949409 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-92rv9\" (UniqueName: \"kubernetes.io/projected/c90553bd-80df-4379-a1a7-dff0ca2619d3-kube-api-access-92rv9\") pod \"octavia-fe57-account-create-update-ft9d9\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:23 crc kubenswrapper[4857]: I1128 15:09:23.989483 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-nv5l9" podStartSLOduration=4.989432267 podStartE2EDuration="4.989432267s" podCreationTimestamp="2025-11-28 15:09:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:09:23.974688904 +0000 UTC m=+6014.098630341" watchObservedRunningTime="2025-11-28 15:09:23.989432267 +0000 UTC m=+6014.113373724" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.014156 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-zrjff" podStartSLOduration=2.014122177 podStartE2EDuration="2.014122177s" podCreationTimestamp="2025-11-28 15:09:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:09:23.994831092 +0000 UTC m=+6014.118772549" watchObservedRunningTime="2025-11-28 15:09:24.014122177 +0000 UTC m=+6014.138063654" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.052417 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90553bd-80df-4379-a1a7-dff0ca2619d3-operator-scripts\") pod \"octavia-fe57-account-create-update-ft9d9\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.052560 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-92rv9\" (UniqueName: \"kubernetes.io/projected/c90553bd-80df-4379-a1a7-dff0ca2619d3-kube-api-access-92rv9\") pod \"octavia-fe57-account-create-update-ft9d9\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.053735 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90553bd-80df-4379-a1a7-dff0ca2619d3-operator-scripts\") pod \"octavia-fe57-account-create-update-ft9d9\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.114909 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-92rv9\" (UniqueName: \"kubernetes.io/projected/c90553bd-80df-4379-a1a7-dff0ca2619d3-kube-api-access-92rv9\") pod \"octavia-fe57-account-create-update-ft9d9\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.223208 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.810237 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-fe57-account-create-update-ft9d9"] Nov 28 15:09:24 crc kubenswrapper[4857]: I1128 15:09:24.956054 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-fe57-account-create-update-ft9d9" event={"ID":"c90553bd-80df-4379-a1a7-dff0ca2619d3","Type":"ContainerStarted","Data":"ea808123b06ba8aa33d5ce17ffa3a9e5aeebcb44de19e158d1bf01d3084421d7"} Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.343414 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.395459 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7787446-63bc-4c06-8cd1-4b40ef714de8-operator-scripts\") pod \"f7787446-63bc-4c06-8cd1-4b40ef714de8\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.395678 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-km4lg\" (UniqueName: \"kubernetes.io/projected/f7787446-63bc-4c06-8cd1-4b40ef714de8-kube-api-access-km4lg\") pod \"f7787446-63bc-4c06-8cd1-4b40ef714de8\" (UID: \"f7787446-63bc-4c06-8cd1-4b40ef714de8\") " Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.396728 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7787446-63bc-4c06-8cd1-4b40ef714de8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f7787446-63bc-4c06-8cd1-4b40ef714de8" (UID: "f7787446-63bc-4c06-8cd1-4b40ef714de8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.409332 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7787446-63bc-4c06-8cd1-4b40ef714de8-kube-api-access-km4lg" (OuterVolumeSpecName: "kube-api-access-km4lg") pod "f7787446-63bc-4c06-8cd1-4b40ef714de8" (UID: "f7787446-63bc-4c06-8cd1-4b40ef714de8"). InnerVolumeSpecName "kube-api-access-km4lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.501183 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-km4lg\" (UniqueName: \"kubernetes.io/projected/f7787446-63bc-4c06-8cd1-4b40ef714de8-kube-api-access-km4lg\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.501238 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7787446-63bc-4c06-8cd1-4b40ef714de8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.975638 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-c8jwf" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.975700 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-c8jwf" event={"ID":"f7787446-63bc-4c06-8cd1-4b40ef714de8","Type":"ContainerDied","Data":"41ad8bfcaaffcfe59a32f71a896aeabbe93dd58d67acb1eba22c50f02cef8468"} Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.977041 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41ad8bfcaaffcfe59a32f71a896aeabbe93dd58d67acb1eba22c50f02cef8468" Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.980126 4857 generic.go:334] "Generic (PLEG): container finished" podID="c90553bd-80df-4379-a1a7-dff0ca2619d3" containerID="45c82728d485824b52bc0e2420b2549f930819f64e69c90e153bbce41bd281e6" exitCode=0 Nov 28 15:09:25 crc kubenswrapper[4857]: I1128 15:09:25.980316 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-fe57-account-create-update-ft9d9" event={"ID":"c90553bd-80df-4379-a1a7-dff0ca2619d3","Type":"ContainerDied","Data":"45c82728d485824b52bc0e2420b2549f930819f64e69c90e153bbce41bd281e6"} Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.484058 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.556609 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90553bd-80df-4379-a1a7-dff0ca2619d3-operator-scripts\") pod \"c90553bd-80df-4379-a1a7-dff0ca2619d3\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.556677 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-92rv9\" (UniqueName: \"kubernetes.io/projected/c90553bd-80df-4379-a1a7-dff0ca2619d3-kube-api-access-92rv9\") pod \"c90553bd-80df-4379-a1a7-dff0ca2619d3\" (UID: \"c90553bd-80df-4379-a1a7-dff0ca2619d3\") " Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.558748 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c90553bd-80df-4379-a1a7-dff0ca2619d3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c90553bd-80df-4379-a1a7-dff0ca2619d3" (UID: "c90553bd-80df-4379-a1a7-dff0ca2619d3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.571421 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c90553bd-80df-4379-a1a7-dff0ca2619d3-kube-api-access-92rv9" (OuterVolumeSpecName: "kube-api-access-92rv9") pod "c90553bd-80df-4379-a1a7-dff0ca2619d3" (UID: "c90553bd-80df-4379-a1a7-dff0ca2619d3"). InnerVolumeSpecName "kube-api-access-92rv9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.659669 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90553bd-80df-4379-a1a7-dff0ca2619d3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:27 crc kubenswrapper[4857]: I1128 15:09:27.659744 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-92rv9\" (UniqueName: \"kubernetes.io/projected/c90553bd-80df-4379-a1a7-dff0ca2619d3-kube-api-access-92rv9\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:28 crc kubenswrapper[4857]: I1128 15:09:28.009918 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-fe57-account-create-update-ft9d9" event={"ID":"c90553bd-80df-4379-a1a7-dff0ca2619d3","Type":"ContainerDied","Data":"ea808123b06ba8aa33d5ce17ffa3a9e5aeebcb44de19e158d1bf01d3084421d7"} Nov 28 15:09:28 crc kubenswrapper[4857]: I1128 15:09:28.009979 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ea808123b06ba8aa33d5ce17ffa3a9e5aeebcb44de19e158d1bf01d3084421d7" Nov 28 15:09:28 crc kubenswrapper[4857]: I1128 15:09:28.010021 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-fe57-account-create-update-ft9d9" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.521330 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-q927d"] Nov 28 15:09:30 crc kubenswrapper[4857]: E1128 15:09:30.522593 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c90553bd-80df-4379-a1a7-dff0ca2619d3" containerName="mariadb-account-create-update" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.522611 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c90553bd-80df-4379-a1a7-dff0ca2619d3" containerName="mariadb-account-create-update" Nov 28 15:09:30 crc kubenswrapper[4857]: E1128 15:09:30.522623 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7787446-63bc-4c06-8cd1-4b40ef714de8" containerName="mariadb-database-create" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.522630 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7787446-63bc-4c06-8cd1-4b40ef714de8" containerName="mariadb-database-create" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.522852 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c90553bd-80df-4379-a1a7-dff0ca2619d3" containerName="mariadb-account-create-update" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.522868 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7787446-63bc-4c06-8cd1-4b40ef714de8" containerName="mariadb-database-create" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.523630 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.536909 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-q927d"] Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.639059 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghpjp\" (UniqueName: \"kubernetes.io/projected/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-kube-api-access-ghpjp\") pod \"octavia-persistence-db-create-q927d\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.639506 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-operator-scripts\") pod \"octavia-persistence-db-create-q927d\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.742410 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghpjp\" (UniqueName: \"kubernetes.io/projected/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-kube-api-access-ghpjp\") pod \"octavia-persistence-db-create-q927d\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.742482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-operator-scripts\") pod \"octavia-persistence-db-create-q927d\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.743431 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-operator-scripts\") pod \"octavia-persistence-db-create-q927d\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.783174 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghpjp\" (UniqueName: \"kubernetes.io/projected/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-kube-api-access-ghpjp\") pod \"octavia-persistence-db-create-q927d\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:30 crc kubenswrapper[4857]: I1128 15:09:30.851902 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.229409 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:09:31 crc kubenswrapper[4857]: E1128 15:09:31.230071 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.457209 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-q927d"] Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.520684 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-2384-account-create-update-jzq8h"] Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.525544 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.530295 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.540283 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-2384-account-create-update-jzq8h"] Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.567350 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78b86780-aa3c-4783-9fbf-69f51b0e4e62-operator-scripts\") pod \"octavia-2384-account-create-update-jzq8h\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.567464 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbsp9\" (UniqueName: \"kubernetes.io/projected/78b86780-aa3c-4783-9fbf-69f51b0e4e62-kube-api-access-zbsp9\") pod \"octavia-2384-account-create-update-jzq8h\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.670127 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78b86780-aa3c-4783-9fbf-69f51b0e4e62-operator-scripts\") pod \"octavia-2384-account-create-update-jzq8h\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.670920 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbsp9\" (UniqueName: \"kubernetes.io/projected/78b86780-aa3c-4783-9fbf-69f51b0e4e62-kube-api-access-zbsp9\") pod \"octavia-2384-account-create-update-jzq8h\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.670918 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78b86780-aa3c-4783-9fbf-69f51b0e4e62-operator-scripts\") pod \"octavia-2384-account-create-update-jzq8h\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.694569 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbsp9\" (UniqueName: \"kubernetes.io/projected/78b86780-aa3c-4783-9fbf-69f51b0e4e62-kube-api-access-zbsp9\") pod \"octavia-2384-account-create-update-jzq8h\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:31 crc kubenswrapper[4857]: I1128 15:09:31.982782 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:32 crc kubenswrapper[4857]: I1128 15:09:32.188073 4857 generic.go:334] "Generic (PLEG): container finished" podID="08e82f77-ad92-4db1-bf6e-fa8a02ae639b" containerID="541ab19a9deabff835426965c0eab98aa35fd9f916f8cd696204af9b039a5b48" exitCode=0 Nov 28 15:09:32 crc kubenswrapper[4857]: I1128 15:09:32.188507 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-q927d" event={"ID":"08e82f77-ad92-4db1-bf6e-fa8a02ae639b","Type":"ContainerDied","Data":"541ab19a9deabff835426965c0eab98aa35fd9f916f8cd696204af9b039a5b48"} Nov 28 15:09:32 crc kubenswrapper[4857]: I1128 15:09:32.188538 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-q927d" event={"ID":"08e82f77-ad92-4db1-bf6e-fa8a02ae639b","Type":"ContainerStarted","Data":"ff71a88d2b36dee15f1ba71c2d8a45c7a81316ae12d88e39ca247324e62e0d17"} Nov 28 15:09:32 crc kubenswrapper[4857]: I1128 15:09:32.649648 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-2384-account-create-update-jzq8h"] Nov 28 15:09:32 crc kubenswrapper[4857]: W1128 15:09:32.650163 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78b86780_aa3c_4783_9fbf_69f51b0e4e62.slice/crio-b9652416226f00829e4eb30ed637622e8f930cb9b4331b122bf277bfa1bbb877 WatchSource:0}: Error finding container b9652416226f00829e4eb30ed637622e8f930cb9b4331b122bf277bfa1bbb877: Status 404 returned error can't find the container with id b9652416226f00829e4eb30ed637622e8f930cb9b4331b122bf277bfa1bbb877 Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.201966 4857 generic.go:334] "Generic (PLEG): container finished" podID="78b86780-aa3c-4783-9fbf-69f51b0e4e62" containerID="defa293fabae210787d2215a9d5d5e802136add47d10ce114244db2f32bd57b6" exitCode=0 Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.202424 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-2384-account-create-update-jzq8h" event={"ID":"78b86780-aa3c-4783-9fbf-69f51b0e4e62","Type":"ContainerDied","Data":"defa293fabae210787d2215a9d5d5e802136add47d10ce114244db2f32bd57b6"} Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.202475 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-2384-account-create-update-jzq8h" event={"ID":"78b86780-aa3c-4783-9fbf-69f51b0e4e62","Type":"ContainerStarted","Data":"b9652416226f00829e4eb30ed637622e8f930cb9b4331b122bf277bfa1bbb877"} Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.651474 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.733354 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghpjp\" (UniqueName: \"kubernetes.io/projected/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-kube-api-access-ghpjp\") pod \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.733724 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-operator-scripts\") pod \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\" (UID: \"08e82f77-ad92-4db1-bf6e-fa8a02ae639b\") " Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.734336 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "08e82f77-ad92-4db1-bf6e-fa8a02ae639b" (UID: "08e82f77-ad92-4db1-bf6e-fa8a02ae639b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.735118 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.746652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-kube-api-access-ghpjp" (OuterVolumeSpecName: "kube-api-access-ghpjp") pod "08e82f77-ad92-4db1-bf6e-fa8a02ae639b" (UID: "08e82f77-ad92-4db1-bf6e-fa8a02ae639b"). InnerVolumeSpecName "kube-api-access-ghpjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:09:33 crc kubenswrapper[4857]: I1128 15:09:33.837893 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghpjp\" (UniqueName: \"kubernetes.io/projected/08e82f77-ad92-4db1-bf6e-fa8a02ae639b-kube-api-access-ghpjp\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.218637 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-q927d" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.218702 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-q927d" event={"ID":"08e82f77-ad92-4db1-bf6e-fa8a02ae639b","Type":"ContainerDied","Data":"ff71a88d2b36dee15f1ba71c2d8a45c7a81316ae12d88e39ca247324e62e0d17"} Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.219330 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff71a88d2b36dee15f1ba71c2d8a45c7a81316ae12d88e39ca247324e62e0d17" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.681868 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.764676 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbsp9\" (UniqueName: \"kubernetes.io/projected/78b86780-aa3c-4783-9fbf-69f51b0e4e62-kube-api-access-zbsp9\") pod \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.765007 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78b86780-aa3c-4783-9fbf-69f51b0e4e62-operator-scripts\") pod \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\" (UID: \"78b86780-aa3c-4783-9fbf-69f51b0e4e62\") " Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.766295 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78b86780-aa3c-4783-9fbf-69f51b0e4e62-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "78b86780-aa3c-4783-9fbf-69f51b0e4e62" (UID: "78b86780-aa3c-4783-9fbf-69f51b0e4e62"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.786987 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78b86780-aa3c-4783-9fbf-69f51b0e4e62-kube-api-access-zbsp9" (OuterVolumeSpecName: "kube-api-access-zbsp9") pod "78b86780-aa3c-4783-9fbf-69f51b0e4e62" (UID: "78b86780-aa3c-4783-9fbf-69f51b0e4e62"). InnerVolumeSpecName "kube-api-access-zbsp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.868753 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/78b86780-aa3c-4783-9fbf-69f51b0e4e62-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:34 crc kubenswrapper[4857]: I1128 15:09:34.869186 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbsp9\" (UniqueName: \"kubernetes.io/projected/78b86780-aa3c-4783-9fbf-69f51b0e4e62-kube-api-access-zbsp9\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:35 crc kubenswrapper[4857]: I1128 15:09:35.234580 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-2384-account-create-update-jzq8h" event={"ID":"78b86780-aa3c-4783-9fbf-69f51b0e4e62","Type":"ContainerDied","Data":"b9652416226f00829e4eb30ed637622e8f930cb9b4331b122bf277bfa1bbb877"} Nov 28 15:09:35 crc kubenswrapper[4857]: I1128 15:09:35.235137 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9652416226f00829e4eb30ed637622e8f930cb9b4331b122bf277bfa1bbb877" Nov 28 15:09:35 crc kubenswrapper[4857]: I1128 15:09:35.234680 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-2384-account-create-update-jzq8h" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.587184 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-85d89c55f6-24c4g"] Nov 28 15:09:37 crc kubenswrapper[4857]: E1128 15:09:37.587843 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08e82f77-ad92-4db1-bf6e-fa8a02ae639b" containerName="mariadb-database-create" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.587856 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="08e82f77-ad92-4db1-bf6e-fa8a02ae639b" containerName="mariadb-database-create" Nov 28 15:09:37 crc kubenswrapper[4857]: E1128 15:09:37.587882 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b86780-aa3c-4783-9fbf-69f51b0e4e62" containerName="mariadb-account-create-update" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.587888 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b86780-aa3c-4783-9fbf-69f51b0e4e62" containerName="mariadb-account-create-update" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.589269 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="08e82f77-ad92-4db1-bf6e-fa8a02ae639b" containerName="mariadb-database-create" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.589293 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="78b86780-aa3c-4783-9fbf-69f51b0e4e62" containerName="mariadb-account-create-update" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.590638 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.593254 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-tgm4n" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.593506 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.595916 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.615538 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-85d89c55f6-24c4g"] Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.629817 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-config-data\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.629872 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-scripts\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.629918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-combined-ca-bundle\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.629974 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/a0be8fcd-dab0-441a-9bab-d934ea59337a-octavia-run\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.630009 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/a0be8fcd-dab0-441a-9bab-d934ea59337a-config-data-merged\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.732008 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-config-data\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.732111 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-scripts\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.732191 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-combined-ca-bundle\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.732260 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/a0be8fcd-dab0-441a-9bab-d934ea59337a-octavia-run\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.732323 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/a0be8fcd-dab0-441a-9bab-d934ea59337a-config-data-merged\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.733502 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/a0be8fcd-dab0-441a-9bab-d934ea59337a-config-data-merged\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.733509 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/a0be8fcd-dab0-441a-9bab-d934ea59337a-octavia-run\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.739832 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-scripts\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.742079 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-combined-ca-bundle\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.751378 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a0be8fcd-dab0-441a-9bab-d934ea59337a-config-data\") pod \"octavia-api-85d89c55f6-24c4g\" (UID: \"a0be8fcd-dab0-441a-9bab-d934ea59337a\") " pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:37 crc kubenswrapper[4857]: I1128 15:09:37.933987 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:38 crc kubenswrapper[4857]: I1128 15:09:38.451246 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-85d89c55f6-24c4g"] Nov 28 15:09:39 crc kubenswrapper[4857]: I1128 15:09:39.290386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-85d89c55f6-24c4g" event={"ID":"a0be8fcd-dab0-441a-9bab-d934ea59337a","Type":"ContainerStarted","Data":"4fd8d7a3d689b7eed8f51a69df33a251d8e27b8693492f7ea27922497942c5d8"} Nov 28 15:09:45 crc kubenswrapper[4857]: I1128 15:09:45.230008 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:09:45 crc kubenswrapper[4857]: E1128 15:09:45.231613 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:09:50 crc kubenswrapper[4857]: I1128 15:09:50.455430 4857 generic.go:334] "Generic (PLEG): container finished" podID="a0be8fcd-dab0-441a-9bab-d934ea59337a" containerID="5c9ed9647bc3976ec3addaf6ecd53600f0c5a570eabe5e02031d3d7fdb30a028" exitCode=0 Nov 28 15:09:50 crc kubenswrapper[4857]: I1128 15:09:50.455527 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-85d89c55f6-24c4g" event={"ID":"a0be8fcd-dab0-441a-9bab-d934ea59337a","Type":"ContainerDied","Data":"5c9ed9647bc3976ec3addaf6ecd53600f0c5a570eabe5e02031d3d7fdb30a028"} Nov 28 15:09:51 crc kubenswrapper[4857]: I1128 15:09:51.474026 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-85d89c55f6-24c4g" event={"ID":"a0be8fcd-dab0-441a-9bab-d934ea59337a","Type":"ContainerStarted","Data":"2841844230ff8ba4e362b72367bf00e94fb5f3a2ed5ff7fda6d6049c71802c22"} Nov 28 15:09:51 crc kubenswrapper[4857]: I1128 15:09:51.475216 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-85d89c55f6-24c4g" event={"ID":"a0be8fcd-dab0-441a-9bab-d934ea59337a","Type":"ContainerStarted","Data":"deeb80d7c1b7daba8651690a2deb0ef5f7e139feaa6bd84a8b9ed605902ba3eb"} Nov 28 15:09:51 crc kubenswrapper[4857]: I1128 15:09:51.475576 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:51 crc kubenswrapper[4857]: I1128 15:09:51.475649 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:09:51 crc kubenswrapper[4857]: I1128 15:09:51.502939 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-85d89c55f6-24c4g" podStartSLOduration=3.27364554 podStartE2EDuration="14.502918721s" podCreationTimestamp="2025-11-28 15:09:37 +0000 UTC" firstStartedPulling="2025-11-28 15:09:38.457583521 +0000 UTC m=+6028.581524968" lastFinishedPulling="2025-11-28 15:09:49.686856722 +0000 UTC m=+6039.810798149" observedRunningTime="2025-11-28 15:09:51.498578545 +0000 UTC m=+6041.622520002" watchObservedRunningTime="2025-11-28 15:09:51.502918721 +0000 UTC m=+6041.626860158" Nov 28 15:09:54 crc kubenswrapper[4857]: I1128 15:09:54.861905 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-hg2vm" podUID="5a653e57-11d1-4d14-9d0d-d8afbbfcffbc" containerName="ovn-controller" probeResult="failure" output=< Nov 28 15:09:54 crc kubenswrapper[4857]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 28 15:09:54 crc kubenswrapper[4857]: > Nov 28 15:09:54 crc kubenswrapper[4857]: I1128 15:09:54.875485 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:54 crc kubenswrapper[4857]: I1128 15:09:54.891612 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-nv5l9" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.035686 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-hg2vm-config-glpml"] Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.037278 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.040933 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.075337 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hg2vm-config-glpml"] Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.151825 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-additional-scripts\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.151890 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-scripts\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.151917 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-log-ovn\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.151961 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bdqcj\" (UniqueName: \"kubernetes.io/projected/447d115d-0dfd-4c2a-a48f-a3f253736f6f-kube-api-access-bdqcj\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.152026 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.152108 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run-ovn\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.254536 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.254717 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run-ovn\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.254793 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-additional-scripts\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.254837 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-scripts\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.254863 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-log-ovn\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.254892 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bdqcj\" (UniqueName: \"kubernetes.io/projected/447d115d-0dfd-4c2a-a48f-a3f253736f6f-kube-api-access-bdqcj\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.255136 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.255273 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run-ovn\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.255444 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-log-ovn\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.255724 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-additional-scripts\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.258092 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-scripts\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.284839 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bdqcj\" (UniqueName: \"kubernetes.io/projected/447d115d-0dfd-4c2a-a48f-a3f253736f6f-kube-api-access-bdqcj\") pod \"ovn-controller-hg2vm-config-glpml\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.377767 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:55 crc kubenswrapper[4857]: I1128 15:09:55.880179 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-hg2vm-config-glpml"] Nov 28 15:09:56 crc kubenswrapper[4857]: I1128 15:09:56.539355 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hg2vm-config-glpml" event={"ID":"447d115d-0dfd-4c2a-a48f-a3f253736f6f","Type":"ContainerStarted","Data":"226311157ec87d35666f9de9c325f469914d8459c2e76a3f9d3785851dfdec6f"} Nov 28 15:09:56 crc kubenswrapper[4857]: I1128 15:09:56.539768 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hg2vm-config-glpml" event={"ID":"447d115d-0dfd-4c2a-a48f-a3f253736f6f","Type":"ContainerStarted","Data":"69997d02e31bb82ee36ff827eae78537f01757c3d769cc65b6603dd3a02f065e"} Nov 28 15:09:56 crc kubenswrapper[4857]: I1128 15:09:56.569558 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-hg2vm-config-glpml" podStartSLOduration=1.569541013 podStartE2EDuration="1.569541013s" podCreationTimestamp="2025-11-28 15:09:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:09:56.563462131 +0000 UTC m=+6046.687403568" watchObservedRunningTime="2025-11-28 15:09:56.569541013 +0000 UTC m=+6046.693482450" Nov 28 15:09:57 crc kubenswrapper[4857]: I1128 15:09:57.552023 4857 generic.go:334] "Generic (PLEG): container finished" podID="447d115d-0dfd-4c2a-a48f-a3f253736f6f" containerID="226311157ec87d35666f9de9c325f469914d8459c2e76a3f9d3785851dfdec6f" exitCode=0 Nov 28 15:09:57 crc kubenswrapper[4857]: I1128 15:09:57.552134 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hg2vm-config-glpml" event={"ID":"447d115d-0dfd-4c2a-a48f-a3f253736f6f","Type":"ContainerDied","Data":"226311157ec87d35666f9de9c325f469914d8459c2e76a3f9d3785851dfdec6f"} Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.123569 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.154890 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-scripts\") pod \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155006 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run\") pod \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155076 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bdqcj\" (UniqueName: \"kubernetes.io/projected/447d115d-0dfd-4c2a-a48f-a3f253736f6f-kube-api-access-bdqcj\") pod \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155161 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run" (OuterVolumeSpecName: "var-run") pod "447d115d-0dfd-4c2a-a48f-a3f253736f6f" (UID: "447d115d-0dfd-4c2a-a48f-a3f253736f6f"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155231 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run-ovn\") pod \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155260 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "447d115d-0dfd-4c2a-a48f-a3f253736f6f" (UID: "447d115d-0dfd-4c2a-a48f-a3f253736f6f"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155328 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-log-ovn\") pod \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155408 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "447d115d-0dfd-4c2a-a48f-a3f253736f6f" (UID: "447d115d-0dfd-4c2a-a48f-a3f253736f6f"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.155452 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-additional-scripts\") pod \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\" (UID: \"447d115d-0dfd-4c2a-a48f-a3f253736f6f\") " Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.156180 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "447d115d-0dfd-4c2a-a48f-a3f253736f6f" (UID: "447d115d-0dfd-4c2a-a48f-a3f253736f6f"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.156193 4857 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.156238 4857 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.156251 4857 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/447d115d-0dfd-4c2a-a48f-a3f253736f6f-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.157873 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-scripts" (OuterVolumeSpecName: "scripts") pod "447d115d-0dfd-4c2a-a48f-a3f253736f6f" (UID: "447d115d-0dfd-4c2a-a48f-a3f253736f6f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.162597 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/447d115d-0dfd-4c2a-a48f-a3f253736f6f-kube-api-access-bdqcj" (OuterVolumeSpecName: "kube-api-access-bdqcj") pod "447d115d-0dfd-4c2a-a48f-a3f253736f6f" (UID: "447d115d-0dfd-4c2a-a48f-a3f253736f6f"). InnerVolumeSpecName "kube-api-access-bdqcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.258338 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.258376 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bdqcj\" (UniqueName: \"kubernetes.io/projected/447d115d-0dfd-4c2a-a48f-a3f253736f6f-kube-api-access-bdqcj\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.258386 4857 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/447d115d-0dfd-4c2a-a48f-a3f253736f6f-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.582225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-hg2vm-config-glpml" event={"ID":"447d115d-0dfd-4c2a-a48f-a3f253736f6f","Type":"ContainerDied","Data":"69997d02e31bb82ee36ff827eae78537f01757c3d769cc65b6603dd3a02f065e"} Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.582271 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69997d02e31bb82ee36ff827eae78537f01757c3d769cc65b6603dd3a02f065e" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.582301 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-hg2vm-config-glpml" Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.667210 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-hg2vm-config-glpml"] Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.678519 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-hg2vm-config-glpml"] Nov 28 15:09:59 crc kubenswrapper[4857]: I1128 15:09:59.905871 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-hg2vm" Nov 28 15:10:00 crc kubenswrapper[4857]: I1128 15:10:00.238087 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:10:00 crc kubenswrapper[4857]: E1128 15:10:00.238997 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:10:00 crc kubenswrapper[4857]: I1128 15:10:00.246968 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="447d115d-0dfd-4c2a-a48f-a3f253736f6f" path="/var/lib/kubelet/pods/447d115d-0dfd-4c2a-a48f-a3f253736f6f/volumes" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.361246 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-7w8sh"] Nov 28 15:10:06 crc kubenswrapper[4857]: E1128 15:10:06.363967 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="447d115d-0dfd-4c2a-a48f-a3f253736f6f" containerName="ovn-config" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.363990 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="447d115d-0dfd-4c2a-a48f-a3f253736f6f" containerName="ovn-config" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.364239 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="447d115d-0dfd-4c2a-a48f-a3f253736f6f" containerName="ovn-config" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.365540 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.368281 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.368641 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.368791 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.380305 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7w8sh"] Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.515265 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/493b0f4e-047e-4f57-bdc7-fd0e2030a799-hm-ports\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.515380 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/493b0f4e-047e-4f57-bdc7-fd0e2030a799-config-data\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.515438 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/493b0f4e-047e-4f57-bdc7-fd0e2030a799-config-data-merged\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.515482 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/493b0f4e-047e-4f57-bdc7-fd0e2030a799-scripts\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.617631 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/493b0f4e-047e-4f57-bdc7-fd0e2030a799-scripts\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.617751 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/493b0f4e-047e-4f57-bdc7-fd0e2030a799-hm-ports\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.617818 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/493b0f4e-047e-4f57-bdc7-fd0e2030a799-config-data\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.617872 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/493b0f4e-047e-4f57-bdc7-fd0e2030a799-config-data-merged\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.618383 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/493b0f4e-047e-4f57-bdc7-fd0e2030a799-config-data-merged\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.619314 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/493b0f4e-047e-4f57-bdc7-fd0e2030a799-hm-ports\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.626373 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/493b0f4e-047e-4f57-bdc7-fd0e2030a799-scripts\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.628127 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/493b0f4e-047e-4f57-bdc7-fd0e2030a799-config-data\") pod \"octavia-rsyslog-7w8sh\" (UID: \"493b0f4e-047e-4f57-bdc7-fd0e2030a799\") " pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.712235 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.995702 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wdmmt"] Nov 28 15:10:06 crc kubenswrapper[4857]: I1128 15:10:06.999658 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.013389 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.064016 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wdmmt"] Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.129404 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/8c3c5560-cd18-4999-b74a-d415bdbe190e-amphora-image\") pod \"octavia-image-upload-59f8cff499-wdmmt\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.130176 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8c3c5560-cd18-4999-b74a-d415bdbe190e-httpd-config\") pod \"octavia-image-upload-59f8cff499-wdmmt\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.233283 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8c3c5560-cd18-4999-b74a-d415bdbe190e-httpd-config\") pod \"octavia-image-upload-59f8cff499-wdmmt\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.233800 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/8c3c5560-cd18-4999-b74a-d415bdbe190e-amphora-image\") pod \"octavia-image-upload-59f8cff499-wdmmt\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.234435 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/8c3c5560-cd18-4999-b74a-d415bdbe190e-amphora-image\") pod \"octavia-image-upload-59f8cff499-wdmmt\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.269440 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8c3c5560-cd18-4999-b74a-d415bdbe190e-httpd-config\") pod \"octavia-image-upload-59f8cff499-wdmmt\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.337048 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-7w8sh"] Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.337070 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:07 crc kubenswrapper[4857]: W1128 15:10:07.338237 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod493b0f4e_047e_4f57_bdc7_fd0e2030a799.slice/crio-ab87facc82478d1fdf3f489b36f8758b720f1f9d462034c5a517f596706edb21 WatchSource:0}: Error finding container ab87facc82478d1fdf3f489b36f8758b720f1f9d462034c5a517f596706edb21: Status 404 returned error can't find the container with id ab87facc82478d1fdf3f489b36f8758b720f1f9d462034c5a517f596706edb21 Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.685418 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7w8sh" event={"ID":"493b0f4e-047e-4f57-bdc7-fd0e2030a799","Type":"ContainerStarted","Data":"ab87facc82478d1fdf3f489b36f8758b720f1f9d462034c5a517f596706edb21"} Nov 28 15:10:07 crc kubenswrapper[4857]: I1128 15:10:07.849991 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wdmmt"] Nov 28 15:10:07 crc kubenswrapper[4857]: W1128 15:10:07.857709 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c3c5560_cd18_4999_b74a_d415bdbe190e.slice/crio-a50f1ff6d570ce1c4761c5005692591a2156f30cbe1149ea1fde73cafefd9da6 WatchSource:0}: Error finding container a50f1ff6d570ce1c4761c5005692591a2156f30cbe1149ea1fde73cafefd9da6: Status 404 returned error can't find the container with id a50f1ff6d570ce1c4761c5005692591a2156f30cbe1149ea1fde73cafefd9da6 Nov 28 15:10:08 crc kubenswrapper[4857]: I1128 15:10:08.699680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" event={"ID":"8c3c5560-cd18-4999-b74a-d415bdbe190e","Type":"ContainerStarted","Data":"a50f1ff6d570ce1c4761c5005692591a2156f30cbe1149ea1fde73cafefd9da6"} Nov 28 15:10:11 crc kubenswrapper[4857]: I1128 15:10:11.949740 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:10:12 crc kubenswrapper[4857]: I1128 15:10:12.251580 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-85d89c55f6-24c4g" Nov 28 15:10:12 crc kubenswrapper[4857]: I1128 15:10:12.752093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7w8sh" event={"ID":"493b0f4e-047e-4f57-bdc7-fd0e2030a799","Type":"ContainerStarted","Data":"d18ed9b867dbb667c2a223205b1f561e4a82748a41f193690c09faeca511a310"} Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.466573 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-pt8bx"] Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.472278 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.478972 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.479841 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-pt8bx"] Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.520256 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data-merged\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.520539 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-combined-ca-bundle\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.520905 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.538455 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-scripts\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.642798 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data-merged\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.642850 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-combined-ca-bundle\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.642992 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.643043 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-scripts\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.644353 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data-merged\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.652324 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-scripts\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.654076 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-combined-ca-bundle\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.655201 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data\") pod \"octavia-db-sync-pt8bx\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:13 crc kubenswrapper[4857]: I1128 15:10:13.822112 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:14 crc kubenswrapper[4857]: I1128 15:10:14.230221 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:10:14 crc kubenswrapper[4857]: E1128 15:10:14.230808 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:10:14 crc kubenswrapper[4857]: I1128 15:10:14.772305 4857 generic.go:334] "Generic (PLEG): container finished" podID="493b0f4e-047e-4f57-bdc7-fd0e2030a799" containerID="d18ed9b867dbb667c2a223205b1f561e4a82748a41f193690c09faeca511a310" exitCode=0 Nov 28 15:10:14 crc kubenswrapper[4857]: I1128 15:10:14.772350 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7w8sh" event={"ID":"493b0f4e-047e-4f57-bdc7-fd0e2030a799","Type":"ContainerDied","Data":"d18ed9b867dbb667c2a223205b1f561e4a82748a41f193690c09faeca511a310"} Nov 28 15:10:14 crc kubenswrapper[4857]: I1128 15:10:14.774486 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:10:19 crc kubenswrapper[4857]: I1128 15:10:19.246222 4857 scope.go:117] "RemoveContainer" containerID="ef7832557a5b4faea11181838ee8f7cc774a8ed39c8007866fd7edfbd451b7cd" Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.461128 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-pt8bx"] Nov 28 15:10:20 crc kubenswrapper[4857]: W1128 15:10:20.474307 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3222b2f_724a_4c6f_9bc4_8744c4dc3377.slice/crio-670ef50c340da65eaa0a27234fa91cccfb38be722936de43d11700da3a6f8f85 WatchSource:0}: Error finding container 670ef50c340da65eaa0a27234fa91cccfb38be722936de43d11700da3a6f8f85: Status 404 returned error can't find the container with id 670ef50c340da65eaa0a27234fa91cccfb38be722936de43d11700da3a6f8f85 Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.866199 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-7w8sh" event={"ID":"493b0f4e-047e-4f57-bdc7-fd0e2030a799","Type":"ContainerStarted","Data":"2d3b2f14aa433bd05e105f73dab43a63a954ae8b5147dd6a9c46f198de153d7d"} Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.866700 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.869921 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-pt8bx" event={"ID":"d3222b2f-724a-4c6f-9bc4-8744c4dc3377","Type":"ContainerStarted","Data":"09c50b0a90cd7928918427f63714abf48ff1895323713efcf0ddf9e7dd0db0c6"} Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.870058 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-pt8bx" event={"ID":"d3222b2f-724a-4c6f-9bc4-8744c4dc3377","Type":"ContainerStarted","Data":"670ef50c340da65eaa0a27234fa91cccfb38be722936de43d11700da3a6f8f85"} Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.872598 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" event={"ID":"8c3c5560-cd18-4999-b74a-d415bdbe190e","Type":"ContainerStarted","Data":"6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d"} Nov 28 15:10:20 crc kubenswrapper[4857]: E1128 15:10:20.885399 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3222b2f_724a_4c6f_9bc4_8744c4dc3377.slice/crio-09c50b0a90cd7928918427f63714abf48ff1895323713efcf0ddf9e7dd0db0c6.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3222b2f_724a_4c6f_9bc4_8744c4dc3377.slice/crio-conmon-09c50b0a90cd7928918427f63714abf48ff1895323713efcf0ddf9e7dd0db0c6.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:10:20 crc kubenswrapper[4857]: I1128 15:10:20.922264 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-7w8sh" podStartSLOduration=2.186025627 podStartE2EDuration="14.92222912s" podCreationTimestamp="2025-11-28 15:10:06 +0000 UTC" firstStartedPulling="2025-11-28 15:10:07.350619053 +0000 UTC m=+6057.474560490" lastFinishedPulling="2025-11-28 15:10:20.086822546 +0000 UTC m=+6070.210763983" observedRunningTime="2025-11-28 15:10:20.90012436 +0000 UTC m=+6071.024065797" watchObservedRunningTime="2025-11-28 15:10:20.92222912 +0000 UTC m=+6071.046170567" Nov 28 15:10:21 crc kubenswrapper[4857]: I1128 15:10:21.890379 4857 generic.go:334] "Generic (PLEG): container finished" podID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerID="6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d" exitCode=0 Nov 28 15:10:21 crc kubenswrapper[4857]: I1128 15:10:21.890609 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" event={"ID":"8c3c5560-cd18-4999-b74a-d415bdbe190e","Type":"ContainerDied","Data":"6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d"} Nov 28 15:10:21 crc kubenswrapper[4857]: I1128 15:10:21.894811 4857 generic.go:334] "Generic (PLEG): container finished" podID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerID="09c50b0a90cd7928918427f63714abf48ff1895323713efcf0ddf9e7dd0db0c6" exitCode=0 Nov 28 15:10:21 crc kubenswrapper[4857]: I1128 15:10:21.894861 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-pt8bx" event={"ID":"d3222b2f-724a-4c6f-9bc4-8744c4dc3377","Type":"ContainerDied","Data":"09c50b0a90cd7928918427f63714abf48ff1895323713efcf0ddf9e7dd0db0c6"} Nov 28 15:10:21 crc kubenswrapper[4857]: I1128 15:10:21.894897 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-pt8bx" event={"ID":"d3222b2f-724a-4c6f-9bc4-8744c4dc3377","Type":"ContainerStarted","Data":"e6913053466e9da85fc1ab3b0b8e5710527123e2a5616312ecbabf7e4050c3b1"} Nov 28 15:10:23 crc kubenswrapper[4857]: I1128 15:10:23.932296 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" event={"ID":"8c3c5560-cd18-4999-b74a-d415bdbe190e","Type":"ContainerStarted","Data":"b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a"} Nov 28 15:10:23 crc kubenswrapper[4857]: I1128 15:10:23.979091 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-pt8bx" podStartSLOduration=10.979001229 podStartE2EDuration="10.979001229s" podCreationTimestamp="2025-11-28 15:10:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:10:21.942329687 +0000 UTC m=+6072.066271124" watchObservedRunningTime="2025-11-28 15:10:23.979001229 +0000 UTC m=+6074.102942716" Nov 28 15:10:23 crc kubenswrapper[4857]: I1128 15:10:23.987295 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" podStartSLOduration=2.445998681 podStartE2EDuration="17.987258829s" podCreationTimestamp="2025-11-28 15:10:06 +0000 UTC" firstStartedPulling="2025-11-28 15:10:07.860544233 +0000 UTC m=+6057.984485670" lastFinishedPulling="2025-11-28 15:10:23.401804351 +0000 UTC m=+6073.525745818" observedRunningTime="2025-11-28 15:10:23.959239921 +0000 UTC m=+6074.083181418" watchObservedRunningTime="2025-11-28 15:10:23.987258829 +0000 UTC m=+6074.111200286" Nov 28 15:10:26 crc kubenswrapper[4857]: I1128 15:10:26.229996 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:10:26 crc kubenswrapper[4857]: E1128 15:10:26.231175 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:10:27 crc kubenswrapper[4857]: I1128 15:10:27.990282 4857 generic.go:334] "Generic (PLEG): container finished" podID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerID="e6913053466e9da85fc1ab3b0b8e5710527123e2a5616312ecbabf7e4050c3b1" exitCode=0 Nov 28 15:10:27 crc kubenswrapper[4857]: I1128 15:10:27.990469 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-pt8bx" event={"ID":"d3222b2f-724a-4c6f-9bc4-8744c4dc3377","Type":"ContainerDied","Data":"e6913053466e9da85fc1ab3b0b8e5710527123e2a5616312ecbabf7e4050c3b1"} Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.503576 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.583077 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data\") pod \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.583406 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data-merged\") pod \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.583536 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-combined-ca-bundle\") pod \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.583632 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-scripts\") pod \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\" (UID: \"d3222b2f-724a-4c6f-9bc4-8744c4dc3377\") " Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.597402 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data" (OuterVolumeSpecName: "config-data") pod "d3222b2f-724a-4c6f-9bc4-8744c4dc3377" (UID: "d3222b2f-724a-4c6f-9bc4-8744c4dc3377"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.598724 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-scripts" (OuterVolumeSpecName: "scripts") pod "d3222b2f-724a-4c6f-9bc4-8744c4dc3377" (UID: "d3222b2f-724a-4c6f-9bc4-8744c4dc3377"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.620445 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "d3222b2f-724a-4c6f-9bc4-8744c4dc3377" (UID: "d3222b2f-724a-4c6f-9bc4-8744c4dc3377"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.633240 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3222b2f-724a-4c6f-9bc4-8744c4dc3377" (UID: "d3222b2f-724a-4c6f-9bc4-8744c4dc3377"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.688540 4857 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.688625 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.688648 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:10:29 crc kubenswrapper[4857]: I1128 15:10:29.688669 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3222b2f-724a-4c6f-9bc4-8744c4dc3377-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:10:30 crc kubenswrapper[4857]: I1128 15:10:30.023430 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-pt8bx" event={"ID":"d3222b2f-724a-4c6f-9bc4-8744c4dc3377","Type":"ContainerDied","Data":"670ef50c340da65eaa0a27234fa91cccfb38be722936de43d11700da3a6f8f85"} Nov 28 15:10:30 crc kubenswrapper[4857]: I1128 15:10:30.024100 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="670ef50c340da65eaa0a27234fa91cccfb38be722936de43d11700da3a6f8f85" Nov 28 15:10:30 crc kubenswrapper[4857]: I1128 15:10:30.023518 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-pt8bx" Nov 28 15:10:36 crc kubenswrapper[4857]: I1128 15:10:36.748803 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-7w8sh" Nov 28 15:10:37 crc kubenswrapper[4857]: I1128 15:10:37.231183 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:10:37 crc kubenswrapper[4857]: E1128 15:10:37.231504 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:10:52 crc kubenswrapper[4857]: I1128 15:10:52.229722 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:10:52 crc kubenswrapper[4857]: E1128 15:10:52.230959 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:10:56 crc kubenswrapper[4857]: I1128 15:10:56.275326 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wdmmt"] Nov 28 15:10:56 crc kubenswrapper[4857]: I1128 15:10:56.276261 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerName="octavia-amphora-httpd" containerID="cri-o://b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a" gracePeriod=30 Nov 28 15:10:56 crc kubenswrapper[4857]: I1128 15:10:56.970822 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.154564 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/8c3c5560-cd18-4999-b74a-d415bdbe190e-amphora-image\") pod \"8c3c5560-cd18-4999-b74a-d415bdbe190e\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.154627 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8c3c5560-cd18-4999-b74a-d415bdbe190e-httpd-config\") pod \"8c3c5560-cd18-4999-b74a-d415bdbe190e\" (UID: \"8c3c5560-cd18-4999-b74a-d415bdbe190e\") " Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.199290 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c3c5560-cd18-4999-b74a-d415bdbe190e-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "8c3c5560-cd18-4999-b74a-d415bdbe190e" (UID: "8c3c5560-cd18-4999-b74a-d415bdbe190e"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.258524 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/8c3c5560-cd18-4999-b74a-d415bdbe190e-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.267480 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8c3c5560-cd18-4999-b74a-d415bdbe190e-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "8c3c5560-cd18-4999-b74a-d415bdbe190e" (UID: "8c3c5560-cd18-4999-b74a-d415bdbe190e"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.361503 4857 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/8c3c5560-cd18-4999-b74a-d415bdbe190e-amphora-image\") on node \"crc\" DevicePath \"\"" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.375535 4857 generic.go:334] "Generic (PLEG): container finished" podID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerID="b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a" exitCode=0 Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.375598 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" event={"ID":"8c3c5560-cd18-4999-b74a-d415bdbe190e","Type":"ContainerDied","Data":"b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a"} Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.375640 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" event={"ID":"8c3c5560-cd18-4999-b74a-d415bdbe190e","Type":"ContainerDied","Data":"a50f1ff6d570ce1c4761c5005692591a2156f30cbe1149ea1fde73cafefd9da6"} Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.375670 4857 scope.go:117] "RemoveContainer" containerID="b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.375870 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-wdmmt" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.402463 4857 scope.go:117] "RemoveContainer" containerID="6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.429698 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wdmmt"] Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.439677 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-wdmmt"] Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.447150 4857 scope.go:117] "RemoveContainer" containerID="b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a" Nov 28 15:10:57 crc kubenswrapper[4857]: E1128 15:10:57.447559 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a\": container with ID starting with b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a not found: ID does not exist" containerID="b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.447602 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a"} err="failed to get container status \"b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a\": rpc error: code = NotFound desc = could not find container \"b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a\": container with ID starting with b3b9bbc325d46037113ccf25ff68095c05dcfb79ac738e1e1376b034b8ff1a5a not found: ID does not exist" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.447632 4857 scope.go:117] "RemoveContainer" containerID="6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d" Nov 28 15:10:57 crc kubenswrapper[4857]: E1128 15:10:57.448341 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d\": container with ID starting with 6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d not found: ID does not exist" containerID="6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d" Nov 28 15:10:57 crc kubenswrapper[4857]: I1128 15:10:57.448400 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d"} err="failed to get container status \"6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d\": rpc error: code = NotFound desc = could not find container \"6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d\": container with ID starting with 6ebb898f9cc9f547a1600de5b78d252a31366327f6247383ae04c0873911737d not found: ID does not exist" Nov 28 15:10:58 crc kubenswrapper[4857]: I1128 15:10:58.251294 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" path="/var/lib/kubelet/pods/8c3c5560-cd18-4999-b74a-d415bdbe190e/volumes" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.398920 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xj54b"] Nov 28 15:11:01 crc kubenswrapper[4857]: E1128 15:11:01.400214 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerName="octavia-amphora-httpd" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.400236 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerName="octavia-amphora-httpd" Nov 28 15:11:01 crc kubenswrapper[4857]: E1128 15:11:01.400276 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerName="init" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.400285 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerName="init" Nov 28 15:11:01 crc kubenswrapper[4857]: E1128 15:11:01.400316 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerName="octavia-db-sync" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.400327 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerName="octavia-db-sync" Nov 28 15:11:01 crc kubenswrapper[4857]: E1128 15:11:01.400345 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerName="init" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.400354 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerName="init" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.400637 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c3c5560-cd18-4999-b74a-d415bdbe190e" containerName="octavia-amphora-httpd" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.400668 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" containerName="octavia-db-sync" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.402262 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.405605 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.419816 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xj54b"] Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.587517 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c-amphora-image\") pod \"octavia-image-upload-59f8cff499-xj54b\" (UID: \"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c\") " pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.587608 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c-httpd-config\") pod \"octavia-image-upload-59f8cff499-xj54b\" (UID: \"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c\") " pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.689660 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c-amphora-image\") pod \"octavia-image-upload-59f8cff499-xj54b\" (UID: \"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c\") " pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.690258 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c-httpd-config\") pod \"octavia-image-upload-59f8cff499-xj54b\" (UID: \"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c\") " pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.690470 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c-amphora-image\") pod \"octavia-image-upload-59f8cff499-xj54b\" (UID: \"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c\") " pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.701684 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c-httpd-config\") pod \"octavia-image-upload-59f8cff499-xj54b\" (UID: \"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c\") " pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:01 crc kubenswrapper[4857]: I1128 15:11:01.733075 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-59f8cff499-xj54b" Nov 28 15:11:02 crc kubenswrapper[4857]: I1128 15:11:02.324138 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-59f8cff499-xj54b"] Nov 28 15:11:02 crc kubenswrapper[4857]: I1128 15:11:02.455826 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xj54b" event={"ID":"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c","Type":"ContainerStarted","Data":"c9875b4b229688f011178a32721a95d3d24e55fb31f27f20f9b86cebdb605849"} Nov 28 15:11:03 crc kubenswrapper[4857]: I1128 15:11:03.469751 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xj54b" event={"ID":"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c","Type":"ContainerStarted","Data":"c078012f8a5ab693e1cc43abb31b90a0fed419a3a2759e34f02168703ca4e84b"} Nov 28 15:11:04 crc kubenswrapper[4857]: I1128 15:11:04.487336 4857 generic.go:334] "Generic (PLEG): container finished" podID="7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c" containerID="c078012f8a5ab693e1cc43abb31b90a0fed419a3a2759e34f02168703ca4e84b" exitCode=0 Nov 28 15:11:04 crc kubenswrapper[4857]: I1128 15:11:04.487459 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xj54b" event={"ID":"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c","Type":"ContainerDied","Data":"c078012f8a5ab693e1cc43abb31b90a0fed419a3a2759e34f02168703ca4e84b"} Nov 28 15:11:05 crc kubenswrapper[4857]: I1128 15:11:05.228887 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:11:05 crc kubenswrapper[4857]: E1128 15:11:05.229831 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:11:06 crc kubenswrapper[4857]: I1128 15:11:06.511238 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-59f8cff499-xj54b" event={"ID":"7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c","Type":"ContainerStarted","Data":"cbcb6bebc59791b65b4bb40caacfcd1ef38c1d323e202c1d0dc1c41d2f39e8ca"} Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.324046 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-59f8cff499-xj54b" podStartSLOduration=6.649871886 podStartE2EDuration="10.324025955s" podCreationTimestamp="2025-11-28 15:11:01 +0000 UTC" firstStartedPulling="2025-11-28 15:11:02.32594426 +0000 UTC m=+6112.449885707" lastFinishedPulling="2025-11-28 15:11:06.000098329 +0000 UTC m=+6116.124039776" observedRunningTime="2025-11-28 15:11:06.529853409 +0000 UTC m=+6116.653794886" watchObservedRunningTime="2025-11-28 15:11:11.324025955 +0000 UTC m=+6121.447967392" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.325923 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-9sgx4"] Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.328199 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.332906 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.334540 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.342736 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.386628 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-9sgx4"] Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.432863 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-config-data\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.432998 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/53410811-937e-4b3e-94fa-e2b2eb63468d-config-data-merged\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.433271 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-scripts\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.433397 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-amphora-certs\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.433473 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-combined-ca-bundle\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.433515 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/53410811-937e-4b3e-94fa-e2b2eb63468d-hm-ports\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.535677 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-config-data\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.536243 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/53410811-937e-4b3e-94fa-e2b2eb63468d-config-data-merged\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.536420 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-scripts\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.536595 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-amphora-certs\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.537096 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/53410811-937e-4b3e-94fa-e2b2eb63468d-config-data-merged\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.537546 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-combined-ca-bundle\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.537747 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/53410811-937e-4b3e-94fa-e2b2eb63468d-hm-ports\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.538775 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/53410811-937e-4b3e-94fa-e2b2eb63468d-hm-ports\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.545022 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-amphora-certs\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.545228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-config-data\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.547143 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-scripts\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.548626 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/53410811-937e-4b3e-94fa-e2b2eb63468d-combined-ca-bundle\") pod \"octavia-healthmanager-9sgx4\" (UID: \"53410811-937e-4b3e-94fa-e2b2eb63468d\") " pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:11 crc kubenswrapper[4857]: I1128 15:11:11.691710 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:12 crc kubenswrapper[4857]: I1128 15:11:12.523602 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-9sgx4"] Nov 28 15:11:12 crc kubenswrapper[4857]: I1128 15:11:12.592924 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-9sgx4" event={"ID":"53410811-937e-4b3e-94fa-e2b2eb63468d","Type":"ContainerStarted","Data":"00f87e77dce2db56c2a2e1d253493f3a9188f2a76ee279cd3463fa56a0f4c16d"} Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.605474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-9sgx4" event={"ID":"53410811-937e-4b3e-94fa-e2b2eb63468d","Type":"ContainerStarted","Data":"662c8c79904502c50983537ea9dc021bb063d696a85bbebff46b80c3a6c70e11"} Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.724535 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-lrkh5"] Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.726126 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.730328 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.730518 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.755804 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-lrkh5"] Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.896862 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-config-data\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.897465 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-scripts\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.897696 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3e200ed8-6d6b-454a-9146-5f86e5892e6d-hm-ports\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.897897 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3e200ed8-6d6b-454a-9146-5f86e5892e6d-config-data-merged\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.898076 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-amphora-certs\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:13 crc kubenswrapper[4857]: I1128 15:11:13.898426 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-combined-ca-bundle\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.001347 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-amphora-certs\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.001477 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-combined-ca-bundle\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.001656 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-config-data\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.001734 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-scripts\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.001810 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3e200ed8-6d6b-454a-9146-5f86e5892e6d-hm-ports\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.001884 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3e200ed8-6d6b-454a-9146-5f86e5892e6d-config-data-merged\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.002748 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/3e200ed8-6d6b-454a-9146-5f86e5892e6d-config-data-merged\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.003257 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/3e200ed8-6d6b-454a-9146-5f86e5892e6d-hm-ports\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.009289 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-config-data\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.009625 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-combined-ca-bundle\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.009741 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-amphora-certs\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.023809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e200ed8-6d6b-454a-9146-5f86e5892e6d-scripts\") pod \"octavia-housekeeping-lrkh5\" (UID: \"3e200ed8-6d6b-454a-9146-5f86e5892e6d\") " pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.055111 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.688257 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-lrkh5"] Nov 28 15:11:14 crc kubenswrapper[4857]: W1128 15:11:14.703174 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3e200ed8_6d6b_454a_9146_5f86e5892e6d.slice/crio-eb7ee3960eb4389943b1133c72c708574e6dc29e05bdc328b44b26caadecdb2e WatchSource:0}: Error finding container eb7ee3960eb4389943b1133c72c708574e6dc29e05bdc328b44b26caadecdb2e: Status 404 returned error can't find the container with id eb7ee3960eb4389943b1133c72c708574e6dc29e05bdc328b44b26caadecdb2e Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.828617 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-r6h2l"] Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.831088 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.840595 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.841814 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.867604 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-r6h2l"] Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.921773 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-config-data-merged\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.921824 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-combined-ca-bundle\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.921856 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-hm-ports\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.921974 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-config-data\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.921997 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-amphora-certs\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:14 crc kubenswrapper[4857]: I1128 15:11:14.922022 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-scripts\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.025678 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-config-data-merged\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.025758 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-combined-ca-bundle\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.025791 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-hm-ports\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.025934 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-config-data\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.025981 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-amphora-certs\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.026009 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-scripts\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.028086 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-hm-ports\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.030543 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-config-data-merged\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.034601 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-combined-ca-bundle\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.034975 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-scripts\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.037673 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-amphora-certs\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.057583 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c22b0407-f4df-49cf-9dc4-9b8c78e835eb-config-data\") pod \"octavia-worker-r6h2l\" (UID: \"c22b0407-f4df-49cf-9dc4-9b8c78e835eb\") " pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.167927 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.636633 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-lrkh5" event={"ID":"3e200ed8-6d6b-454a-9146-5f86e5892e6d","Type":"ContainerStarted","Data":"eb7ee3960eb4389943b1133c72c708574e6dc29e05bdc328b44b26caadecdb2e"} Nov 28 15:11:15 crc kubenswrapper[4857]: I1128 15:11:15.799911 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-r6h2l"] Nov 28 15:11:15 crc kubenswrapper[4857]: W1128 15:11:15.809893 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc22b0407_f4df_49cf_9dc4_9b8c78e835eb.slice/crio-d613fa1b98fa1ad07065b55a6d423757c0b6327f19d87118a77d756053ebc875 WatchSource:0}: Error finding container d613fa1b98fa1ad07065b55a6d423757c0b6327f19d87118a77d756053ebc875: Status 404 returned error can't find the container with id d613fa1b98fa1ad07065b55a6d423757c0b6327f19d87118a77d756053ebc875 Nov 28 15:11:16 crc kubenswrapper[4857]: I1128 15:11:16.651150 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r6h2l" event={"ID":"c22b0407-f4df-49cf-9dc4-9b8c78e835eb","Type":"ContainerStarted","Data":"d613fa1b98fa1ad07065b55a6d423757c0b6327f19d87118a77d756053ebc875"} Nov 28 15:11:17 crc kubenswrapper[4857]: I1128 15:11:17.667479 4857 generic.go:334] "Generic (PLEG): container finished" podID="53410811-937e-4b3e-94fa-e2b2eb63468d" containerID="662c8c79904502c50983537ea9dc021bb063d696a85bbebff46b80c3a6c70e11" exitCode=0 Nov 28 15:11:17 crc kubenswrapper[4857]: I1128 15:11:17.667555 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-9sgx4" event={"ID":"53410811-937e-4b3e-94fa-e2b2eb63468d","Type":"ContainerDied","Data":"662c8c79904502c50983537ea9dc021bb063d696a85bbebff46b80c3a6c70e11"} Nov 28 15:11:18 crc kubenswrapper[4857]: I1128 15:11:18.229933 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:11:18 crc kubenswrapper[4857]: I1128 15:11:18.682756 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"cd65dc19359f44ea74d1e09fca4c6fc276670fdbfc5a599a66977a1d2ab62015"} Nov 28 15:11:18 crc kubenswrapper[4857]: I1128 15:11:18.686679 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-9sgx4" event={"ID":"53410811-937e-4b3e-94fa-e2b2eb63468d","Type":"ContainerStarted","Data":"1dc6608d931985f09f7d289f1ebf16be54f23c50d58461e12bec003286ba50f1"} Nov 28 15:11:18 crc kubenswrapper[4857]: I1128 15:11:18.686918 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:18 crc kubenswrapper[4857]: I1128 15:11:18.728647 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-9sgx4" podStartSLOduration=7.728626586 podStartE2EDuration="7.728626586s" podCreationTimestamp="2025-11-28 15:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:11:18.724322251 +0000 UTC m=+6128.848263688" watchObservedRunningTime="2025-11-28 15:11:18.728626586 +0000 UTC m=+6128.852568023" Nov 28 15:11:19 crc kubenswrapper[4857]: I1128 15:11:19.698932 4857 generic.go:334] "Generic (PLEG): container finished" podID="3e200ed8-6d6b-454a-9146-5f86e5892e6d" containerID="cf9cd148115b81506cc360d9608f6896fe4185ad04b757c4c76e03b77800a42c" exitCode=0 Nov 28 15:11:19 crc kubenswrapper[4857]: I1128 15:11:19.699133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-lrkh5" event={"ID":"3e200ed8-6d6b-454a-9146-5f86e5892e6d","Type":"ContainerDied","Data":"cf9cd148115b81506cc360d9608f6896fe4185ad04b757c4c76e03b77800a42c"} Nov 28 15:11:19 crc kubenswrapper[4857]: I1128 15:11:19.953402 4857 scope.go:117] "RemoveContainer" containerID="d0586034b0e1ec57aaf810903325384698f3aeda025cd7630dcc3443257a3aa3" Nov 28 15:11:19 crc kubenswrapper[4857]: I1128 15:11:19.994589 4857 scope.go:117] "RemoveContainer" containerID="c74c856a899b9d5c2e547d04d22bc1bd3f90653812ae75b653ac36de7896a1cb" Nov 28 15:11:20 crc kubenswrapper[4857]: I1128 15:11:20.040838 4857 scope.go:117] "RemoveContainer" containerID="56727d3cf65b3d32cfa483c994c376b9874042e1cb2b9909a2d307fa0e125180" Nov 28 15:11:20 crc kubenswrapper[4857]: I1128 15:11:20.715395 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-lrkh5" event={"ID":"3e200ed8-6d6b-454a-9146-5f86e5892e6d","Type":"ContainerStarted","Data":"d7b516b39c10c234c5dab6b31f73f36392e615dc75bfabfcbdfda53c756a144e"} Nov 28 15:11:20 crc kubenswrapper[4857]: I1128 15:11:20.715984 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:20 crc kubenswrapper[4857]: I1128 15:11:20.723213 4857 generic.go:334] "Generic (PLEG): container finished" podID="c22b0407-f4df-49cf-9dc4-9b8c78e835eb" containerID="6f26dbbebca4e927d31e958406de7ab4362aeb8bd38083e0869b87998b79805e" exitCode=0 Nov 28 15:11:20 crc kubenswrapper[4857]: I1128 15:11:20.723373 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r6h2l" event={"ID":"c22b0407-f4df-49cf-9dc4-9b8c78e835eb","Type":"ContainerDied","Data":"6f26dbbebca4e927d31e958406de7ab4362aeb8bd38083e0869b87998b79805e"} Nov 28 15:11:20 crc kubenswrapper[4857]: I1128 15:11:20.758200 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-lrkh5" podStartSLOduration=4.268101395 podStartE2EDuration="7.758176557s" podCreationTimestamp="2025-11-28 15:11:13 +0000 UTC" firstStartedPulling="2025-11-28 15:11:14.706848132 +0000 UTC m=+6124.830789569" lastFinishedPulling="2025-11-28 15:11:18.196923284 +0000 UTC m=+6128.320864731" observedRunningTime="2025-11-28 15:11:20.74930658 +0000 UTC m=+6130.873248037" watchObservedRunningTime="2025-11-28 15:11:20.758176557 +0000 UTC m=+6130.882117994" Nov 28 15:11:21 crc kubenswrapper[4857]: I1128 15:11:21.745388 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-r6h2l" event={"ID":"c22b0407-f4df-49cf-9dc4-9b8c78e835eb","Type":"ContainerStarted","Data":"b3530b0719c40f53f3ef48ba51c8add16483692af861ba9ea5b37f7f7876fd59"} Nov 28 15:11:21 crc kubenswrapper[4857]: I1128 15:11:21.746337 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:21 crc kubenswrapper[4857]: I1128 15:11:21.775404 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-r6h2l" podStartSLOduration=4.320798324 podStartE2EDuration="7.775375717s" podCreationTimestamp="2025-11-28 15:11:14 +0000 UTC" firstStartedPulling="2025-11-28 15:11:15.812169106 +0000 UTC m=+6125.936110543" lastFinishedPulling="2025-11-28 15:11:19.266746509 +0000 UTC m=+6129.390687936" observedRunningTime="2025-11-28 15:11:21.770854766 +0000 UTC m=+6131.894796203" watchObservedRunningTime="2025-11-28 15:11:21.775375717 +0000 UTC m=+6131.899317154" Nov 28 15:11:22 crc kubenswrapper[4857]: I1128 15:11:22.836272 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9dsxn"] Nov 28 15:11:22 crc kubenswrapper[4857]: I1128 15:11:22.842069 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:22 crc kubenswrapper[4857]: I1128 15:11:22.870349 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dsxn"] Nov 28 15:11:22 crc kubenswrapper[4857]: I1128 15:11:22.947385 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-catalog-content\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:22 crc kubenswrapper[4857]: I1128 15:11:22.947564 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wt8mt\" (UniqueName: \"kubernetes.io/projected/236954d1-afa9-4393-97f9-3de005164e3d-kube-api-access-wt8mt\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:22 crc kubenswrapper[4857]: I1128 15:11:22.947596 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-utilities\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.049722 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wt8mt\" (UniqueName: \"kubernetes.io/projected/236954d1-afa9-4393-97f9-3de005164e3d-kube-api-access-wt8mt\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.049771 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-utilities\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.049867 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-catalog-content\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.050689 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-catalog-content\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.051258 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-utilities\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.077829 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wt8mt\" (UniqueName: \"kubernetes.io/projected/236954d1-afa9-4393-97f9-3de005164e3d-kube-api-access-wt8mt\") pod \"redhat-marketplace-9dsxn\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.182921 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.713334 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dsxn"] Nov 28 15:11:23 crc kubenswrapper[4857]: I1128 15:11:23.784388 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dsxn" event={"ID":"236954d1-afa9-4393-97f9-3de005164e3d","Type":"ContainerStarted","Data":"cabd630c18b5bbf6af0d7d1119fb026b39369c16f5a233da285d5e39fef94788"} Nov 28 15:11:24 crc kubenswrapper[4857]: I1128 15:11:24.795857 4857 generic.go:334] "Generic (PLEG): container finished" podID="236954d1-afa9-4393-97f9-3de005164e3d" containerID="59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046" exitCode=0 Nov 28 15:11:24 crc kubenswrapper[4857]: I1128 15:11:24.796011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dsxn" event={"ID":"236954d1-afa9-4393-97f9-3de005164e3d","Type":"ContainerDied","Data":"59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046"} Nov 28 15:11:26 crc kubenswrapper[4857]: I1128 15:11:26.740359 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-9sgx4" Nov 28 15:11:27 crc kubenswrapper[4857]: I1128 15:11:27.845384 4857 generic.go:334] "Generic (PLEG): container finished" podID="236954d1-afa9-4393-97f9-3de005164e3d" containerID="c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c" exitCode=0 Nov 28 15:11:27 crc kubenswrapper[4857]: I1128 15:11:27.845485 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dsxn" event={"ID":"236954d1-afa9-4393-97f9-3de005164e3d","Type":"ContainerDied","Data":"c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c"} Nov 28 15:11:28 crc kubenswrapper[4857]: I1128 15:11:28.858199 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dsxn" event={"ID":"236954d1-afa9-4393-97f9-3de005164e3d","Type":"ContainerStarted","Data":"9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8"} Nov 28 15:11:28 crc kubenswrapper[4857]: I1128 15:11:28.875498 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9dsxn" podStartSLOduration=3.403230509 podStartE2EDuration="6.875479705s" podCreationTimestamp="2025-11-28 15:11:22 +0000 UTC" firstStartedPulling="2025-11-28 15:11:24.798234219 +0000 UTC m=+6134.922175656" lastFinishedPulling="2025-11-28 15:11:28.270483405 +0000 UTC m=+6138.394424852" observedRunningTime="2025-11-28 15:11:28.875062804 +0000 UTC m=+6138.999004241" watchObservedRunningTime="2025-11-28 15:11:28.875479705 +0000 UTC m=+6138.999421142" Nov 28 15:11:29 crc kubenswrapper[4857]: I1128 15:11:29.099697 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-lrkh5" Nov 28 15:11:30 crc kubenswrapper[4857]: I1128 15:11:30.217475 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-r6h2l" Nov 28 15:11:33 crc kubenswrapper[4857]: I1128 15:11:33.183454 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:33 crc kubenswrapper[4857]: I1128 15:11:33.184194 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:33 crc kubenswrapper[4857]: I1128 15:11:33.258492 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:34 crc kubenswrapper[4857]: I1128 15:11:34.002579 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:34 crc kubenswrapper[4857]: I1128 15:11:34.062840 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dsxn"] Nov 28 15:11:35 crc kubenswrapper[4857]: I1128 15:11:35.944479 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9dsxn" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="registry-server" containerID="cri-o://9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8" gracePeriod=2 Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.565998 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.741805 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wt8mt\" (UniqueName: \"kubernetes.io/projected/236954d1-afa9-4393-97f9-3de005164e3d-kube-api-access-wt8mt\") pod \"236954d1-afa9-4393-97f9-3de005164e3d\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.741910 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-utilities\") pod \"236954d1-afa9-4393-97f9-3de005164e3d\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.742288 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-catalog-content\") pod \"236954d1-afa9-4393-97f9-3de005164e3d\" (UID: \"236954d1-afa9-4393-97f9-3de005164e3d\") " Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.742781 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-utilities" (OuterVolumeSpecName: "utilities") pod "236954d1-afa9-4393-97f9-3de005164e3d" (UID: "236954d1-afa9-4393-97f9-3de005164e3d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.743152 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.751352 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/236954d1-afa9-4393-97f9-3de005164e3d-kube-api-access-wt8mt" (OuterVolumeSpecName: "kube-api-access-wt8mt") pod "236954d1-afa9-4393-97f9-3de005164e3d" (UID: "236954d1-afa9-4393-97f9-3de005164e3d"). InnerVolumeSpecName "kube-api-access-wt8mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.762652 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "236954d1-afa9-4393-97f9-3de005164e3d" (UID: "236954d1-afa9-4393-97f9-3de005164e3d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.845960 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/236954d1-afa9-4393-97f9-3de005164e3d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.846011 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wt8mt\" (UniqueName: \"kubernetes.io/projected/236954d1-afa9-4393-97f9-3de005164e3d-kube-api-access-wt8mt\") on node \"crc\" DevicePath \"\"" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.956933 4857 generic.go:334] "Generic (PLEG): container finished" podID="236954d1-afa9-4393-97f9-3de005164e3d" containerID="9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8" exitCode=0 Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.957021 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dsxn" event={"ID":"236954d1-afa9-4393-97f9-3de005164e3d","Type":"ContainerDied","Data":"9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8"} Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.957073 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9dsxn" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.957122 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9dsxn" event={"ID":"236954d1-afa9-4393-97f9-3de005164e3d","Type":"ContainerDied","Data":"cabd630c18b5bbf6af0d7d1119fb026b39369c16f5a233da285d5e39fef94788"} Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.957162 4857 scope.go:117] "RemoveContainer" containerID="9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8" Nov 28 15:11:36 crc kubenswrapper[4857]: I1128 15:11:36.994386 4857 scope.go:117] "RemoveContainer" containerID="c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.025344 4857 scope.go:117] "RemoveContainer" containerID="59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.055236 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dsxn"] Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.072925 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9dsxn"] Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.088333 4857 scope.go:117] "RemoveContainer" containerID="9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8" Nov 28 15:11:37 crc kubenswrapper[4857]: E1128 15:11:37.088997 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8\": container with ID starting with 9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8 not found: ID does not exist" containerID="9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.089044 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8"} err="failed to get container status \"9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8\": rpc error: code = NotFound desc = could not find container \"9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8\": container with ID starting with 9ee390f46613e30388dc058e3fb9fea3b32ad7d27b6ecaa6187bc025e05a59b8 not found: ID does not exist" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.089080 4857 scope.go:117] "RemoveContainer" containerID="c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c" Nov 28 15:11:37 crc kubenswrapper[4857]: E1128 15:11:37.089606 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c\": container with ID starting with c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c not found: ID does not exist" containerID="c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.089634 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c"} err="failed to get container status \"c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c\": rpc error: code = NotFound desc = could not find container \"c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c\": container with ID starting with c6063ab1148405a29d70de72095518f2893a9ca316e8dfce6cf64e9b0f0a810c not found: ID does not exist" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.089652 4857 scope.go:117] "RemoveContainer" containerID="59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046" Nov 28 15:11:37 crc kubenswrapper[4857]: E1128 15:11:37.089974 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046\": container with ID starting with 59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046 not found: ID does not exist" containerID="59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046" Nov 28 15:11:37 crc kubenswrapper[4857]: I1128 15:11:37.090000 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046"} err="failed to get container status \"59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046\": rpc error: code = NotFound desc = could not find container \"59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046\": container with ID starting with 59869be4831ed99044f6f74e1172dac1b29ccf26773e8059ee5418749b54b046 not found: ID does not exist" Nov 28 15:11:38 crc kubenswrapper[4857]: I1128 15:11:38.248456 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="236954d1-afa9-4393-97f9-3de005164e3d" path="/var/lib/kubelet/pods/236954d1-afa9-4393-97f9-3de005164e3d/volumes" Nov 28 15:11:40 crc kubenswrapper[4857]: I1128 15:11:40.054725 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f001-account-create-update-5z7gz"] Nov 28 15:11:40 crc kubenswrapper[4857]: I1128 15:11:40.068287 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-nlb4x"] Nov 28 15:11:40 crc kubenswrapper[4857]: I1128 15:11:40.078846 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f001-account-create-update-5z7gz"] Nov 28 15:11:40 crc kubenswrapper[4857]: I1128 15:11:40.087057 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-nlb4x"] Nov 28 15:11:40 crc kubenswrapper[4857]: I1128 15:11:40.242246 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="571ee4e3-3f14-4e48-be69-dd76619de4fa" path="/var/lib/kubelet/pods/571ee4e3-3f14-4e48-be69-dd76619de4fa/volumes" Nov 28 15:11:40 crc kubenswrapper[4857]: I1128 15:11:40.243763 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb6077e1-1cfe-42fb-97fa-e67c1eecb683" path="/var/lib/kubelet/pods/fb6077e1-1cfe-42fb-97fa-e67c1eecb683/volumes" Nov 28 15:11:46 crc kubenswrapper[4857]: I1128 15:11:46.067938 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-dwvtm"] Nov 28 15:11:46 crc kubenswrapper[4857]: I1128 15:11:46.083296 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-dwvtm"] Nov 28 15:11:46 crc kubenswrapper[4857]: I1128 15:11:46.242866 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8326720-d37a-4506-93ce-1bec95cfebf8" path="/var/lib/kubelet/pods/e8326720-d37a-4506-93ce-1bec95cfebf8/volumes" Nov 28 15:12:15 crc kubenswrapper[4857]: I1128 15:12:15.068342 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-cnbmb"] Nov 28 15:12:15 crc kubenswrapper[4857]: I1128 15:12:15.082143 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-ed96-account-create-update-jvjfk"] Nov 28 15:12:15 crc kubenswrapper[4857]: I1128 15:12:15.093294 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-cnbmb"] Nov 28 15:12:15 crc kubenswrapper[4857]: I1128 15:12:15.117458 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-ed96-account-create-update-jvjfk"] Nov 28 15:12:16 crc kubenswrapper[4857]: I1128 15:12:16.253222 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="841bff9a-10d1-4c28-b32b-61c18325e2c5" path="/var/lib/kubelet/pods/841bff9a-10d1-4c28-b32b-61c18325e2c5/volumes" Nov 28 15:12:16 crc kubenswrapper[4857]: I1128 15:12:16.255103 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa05d80d-4f5e-4599-8faa-491f6f6a641f" path="/var/lib/kubelet/pods/aa05d80d-4f5e-4599-8faa-491f6f6a641f/volumes" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.142669 4857 scope.go:117] "RemoveContainer" containerID="7a2a489cf5cefc4618f7d79e150c9b12bcebe3908fa13cf6c78ce0569b154351" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.189709 4857 scope.go:117] "RemoveContainer" containerID="92590d34f84721a2e7caeae9f0e60434bdb61d2886da3a0495b4b614dc20dd25" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.265658 4857 scope.go:117] "RemoveContainer" containerID="9a32cf3eef57b99d2903f9dd4eff9f7c90e3edfdba0a3bfd484b6fd68afe798c" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.315330 4857 scope.go:117] "RemoveContainer" containerID="7f734298ba3a41cbf3c4ca474870c179f47438d78db4457c6b88ac8b89770c3a" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.357329 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-854678bdd5-rtdj4"] Nov 28 15:12:20 crc kubenswrapper[4857]: E1128 15:12:20.357861 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="registry-server" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.357874 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="registry-server" Nov 28 15:12:20 crc kubenswrapper[4857]: E1128 15:12:20.357896 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="extract-content" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.357902 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="extract-content" Nov 28 15:12:20 crc kubenswrapper[4857]: E1128 15:12:20.357924 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="extract-utilities" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.357932 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="extract-utilities" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.358253 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="236954d1-afa9-4393-97f9-3de005164e3d" containerName="registry-server" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.359641 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.366933 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.367232 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.367485 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-frgtv" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.367790 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.387885 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-854678bdd5-rtdj4"] Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.392623 4857 scope.go:117] "RemoveContainer" containerID="bd795e43cf366b5b44628151afc3305d28178601944c9b5bded7f5993e49d5bb" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.449931 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.452746 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-httpd" containerID="cri-o://5d6fe1a0c601b448ca204da2df776fd586604ead34e0b8cdb4f9e2c73083753a" gracePeriod=30 Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.457036 4857 scope.go:117] "RemoveContainer" containerID="39ff0a0b7a01cd011676bb9f10e3e3f07f0635ee6b35c4cc53f803dcabbc8b0c" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.451422 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-log" containerID="cri-o://16c1e790752c7280a64ad482220e23888921db72437b11a041d8fb9cfad8945c" gracePeriod=30 Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.503407 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6c4cb88c9c-rqnkt"] Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.505222 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.538212 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c4cb88c9c-rqnkt"] Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.570330 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-config-data\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.570374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f765626-ba83-4476-8183-af7018c79378-logs\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.570429 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f765626-ba83-4476-8183-af7018c79378-horizon-secret-key\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.570459 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrdzz\" (UniqueName: \"kubernetes.io/projected/3f765626-ba83-4476-8183-af7018c79378-kube-api-access-vrdzz\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.570522 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-scripts\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.598123 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.598379 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-log" containerID="cri-o://e2e67b60862be42ab9c310508d71414a992de3cb1c03272009bbe6432e6cccd1" gracePeriod=30 Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.598511 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-httpd" containerID="cri-o://fd53eaaab460ddc9dbffd5a200e30daab396566fb89befa6db79d514fad93ab7" gracePeriod=30 Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.672575 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-scripts\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673170 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a9996acb-df90-4cbb-9ef6-ab35be1f070d-horizon-secret-key\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673298 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9996acb-df90-4cbb-9ef6-ab35be1f070d-logs\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673393 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-scripts\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673404 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-config-data\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673484 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f765626-ba83-4476-8183-af7018c79378-logs\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673618 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f765626-ba83-4476-8183-af7018c79378-horizon-secret-key\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673676 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nn5h\" (UniqueName: \"kubernetes.io/projected/a9996acb-df90-4cbb-9ef6-ab35be1f070d-kube-api-access-5nn5h\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673703 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrdzz\" (UniqueName: \"kubernetes.io/projected/3f765626-ba83-4476-8183-af7018c79378-kube-api-access-vrdzz\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673756 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-config-data\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673816 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-scripts\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.673988 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f765626-ba83-4476-8183-af7018c79378-logs\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.674589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-config-data\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.681901 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f765626-ba83-4476-8183-af7018c79378-horizon-secret-key\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.690276 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrdzz\" (UniqueName: \"kubernetes.io/projected/3f765626-ba83-4476-8183-af7018c79378-kube-api-access-vrdzz\") pod \"horizon-854678bdd5-rtdj4\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.760309 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.776982 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nn5h\" (UniqueName: \"kubernetes.io/projected/a9996acb-df90-4cbb-9ef6-ab35be1f070d-kube-api-access-5nn5h\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.777117 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-config-data\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.779639 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-config-data\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.780868 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-scripts\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.780972 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-scripts\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.781180 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a9996acb-df90-4cbb-9ef6-ab35be1f070d-horizon-secret-key\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.782018 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9996acb-df90-4cbb-9ef6-ab35be1f070d-logs\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.782453 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9996acb-df90-4cbb-9ef6-ab35be1f070d-logs\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.785246 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a9996acb-df90-4cbb-9ef6-ab35be1f070d-horizon-secret-key\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.794137 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nn5h\" (UniqueName: \"kubernetes.io/projected/a9996acb-df90-4cbb-9ef6-ab35be1f070d-kube-api-access-5nn5h\") pod \"horizon-6c4cb88c9c-rqnkt\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:20 crc kubenswrapper[4857]: I1128 15:12:20.877689 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.657230 4857 generic.go:334] "Generic (PLEG): container finished" podID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerID="e2e67b60862be42ab9c310508d71414a992de3cb1c03272009bbe6432e6cccd1" exitCode=143 Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.657970 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5dc2df72-1be4-4bad-8282-bb1b65226de0","Type":"ContainerDied","Data":"e2e67b60862be42ab9c310508d71414a992de3cb1c03272009bbe6432e6cccd1"} Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.660857 4857 generic.go:334] "Generic (PLEG): container finished" podID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerID="16c1e790752c7280a64ad482220e23888921db72437b11a041d8fb9cfad8945c" exitCode=143 Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.660909 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7","Type":"ContainerDied","Data":"16c1e790752c7280a64ad482220e23888921db72437b11a041d8fb9cfad8945c"} Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.708353 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c4cb88c9c-rqnkt"] Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.773885 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-66cdcfc869-s88hx"] Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.775515 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.792827 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66cdcfc869-s88hx"] Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.802089 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-854678bdd5-rtdj4"] Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.940548 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twxbk\" (UniqueName: \"kubernetes.io/projected/26ad8963-2e63-4a75-9de9-90e28f9153d5-kube-api-access-twxbk\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.940636 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26ad8963-2e63-4a75-9de9-90e28f9153d5-horizon-secret-key\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.940721 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-scripts\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.940839 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-config-data\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:21 crc kubenswrapper[4857]: I1128 15:12:21.941237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26ad8963-2e63-4a75-9de9-90e28f9153d5-logs\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.044070 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26ad8963-2e63-4a75-9de9-90e28f9153d5-logs\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.045127 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twxbk\" (UniqueName: \"kubernetes.io/projected/26ad8963-2e63-4a75-9de9-90e28f9153d5-kube-api-access-twxbk\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.045259 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26ad8963-2e63-4a75-9de9-90e28f9153d5-horizon-secret-key\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.044969 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26ad8963-2e63-4a75-9de9-90e28f9153d5-logs\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.045440 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-scripts\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.045572 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-config-data\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.046747 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-config-data\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.048110 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-scripts\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.054063 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26ad8963-2e63-4a75-9de9-90e28f9153d5-horizon-secret-key\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.076850 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twxbk\" (UniqueName: \"kubernetes.io/projected/26ad8963-2e63-4a75-9de9-90e28f9153d5-kube-api-access-twxbk\") pod \"horizon-66cdcfc869-s88hx\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.101292 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.189237 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c4cb88c9c-rqnkt"] Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.676049 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c4cb88c9c-rqnkt" event={"ID":"a9996acb-df90-4cbb-9ef6-ab35be1f070d","Type":"ContainerStarted","Data":"3cd4d565a88b2d007d0deadf3c3e2ae8236d4a97ff50c6ea482b44acb160355e"} Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.690331 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-66cdcfc869-s88hx"] Nov 28 15:12:22 crc kubenswrapper[4857]: I1128 15:12:22.693149 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-854678bdd5-rtdj4" event={"ID":"3f765626-ba83-4476-8183-af7018c79378","Type":"ContainerStarted","Data":"59e32b174ade69b1e7494867c68af181e66c039ee80ff7364084bf6eda93183d"} Nov 28 15:12:23 crc kubenswrapper[4857]: I1128 15:12:23.039610 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-whh9q"] Nov 28 15:12:23 crc kubenswrapper[4857]: I1128 15:12:23.053642 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-whh9q"] Nov 28 15:12:23 crc kubenswrapper[4857]: I1128 15:12:23.707723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66cdcfc869-s88hx" event={"ID":"26ad8963-2e63-4a75-9de9-90e28f9153d5","Type":"ContainerStarted","Data":"0b7574c77bdd057708879337575f001f53967e076e1295e61dd70d6f9fa8cc8a"} Nov 28 15:12:23 crc kubenswrapper[4857]: I1128 15:12:23.758300 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-log" probeResult="failure" output="Get \"http://10.217.1.47:9292/healthcheck\": read tcp 10.217.0.2:60624->10.217.1.47:9292: read: connection reset by peer" Nov 28 15:12:23 crc kubenswrapper[4857]: I1128 15:12:23.758336 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-httpd" probeResult="failure" output="Get \"http://10.217.1.47:9292/healthcheck\": read tcp 10.217.0.2:60638->10.217.1.47:9292: read: connection reset by peer" Nov 28 15:12:24 crc kubenswrapper[4857]: I1128 15:12:24.256053 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd3bbc29-eabb-408a-88d2-5adfe6e67e36" path="/var/lib/kubelet/pods/dd3bbc29-eabb-408a-88d2-5adfe6e67e36/volumes" Nov 28 15:12:24 crc kubenswrapper[4857]: E1128 15:12:24.616343 4857 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dc2df72_1be4_4bad_8282_bb1b65226de0.slice/crio-conmon-fd53eaaab460ddc9dbffd5a200e30daab396566fb89befa6db79d514fad93ab7.scope\": RecentStats: unable to find data in memory cache]" Nov 28 15:12:24 crc kubenswrapper[4857]: I1128 15:12:24.720008 4857 generic.go:334] "Generic (PLEG): container finished" podID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerID="5d6fe1a0c601b448ca204da2df776fd586604ead34e0b8cdb4f9e2c73083753a" exitCode=0 Nov 28 15:12:24 crc kubenswrapper[4857]: I1128 15:12:24.720199 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7","Type":"ContainerDied","Data":"5d6fe1a0c601b448ca204da2df776fd586604ead34e0b8cdb4f9e2c73083753a"} Nov 28 15:12:24 crc kubenswrapper[4857]: I1128 15:12:24.723289 4857 generic.go:334] "Generic (PLEG): container finished" podID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerID="fd53eaaab460ddc9dbffd5a200e30daab396566fb89befa6db79d514fad93ab7" exitCode=0 Nov 28 15:12:24 crc kubenswrapper[4857]: I1128 15:12:24.723320 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5dc2df72-1be4-4bad-8282-bb1b65226de0","Type":"ContainerDied","Data":"fd53eaaab460ddc9dbffd5a200e30daab396566fb89befa6db79d514fad93ab7"} Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.784544 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"5dc2df72-1be4-4bad-8282-bb1b65226de0","Type":"ContainerDied","Data":"d62232f9652290721f668239db343d348023961c72df0a723e06f9819c3875c9"} Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.785287 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d62232f9652290721f668239db343d348023961c72df0a723e06f9819c3875c9" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.793125 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7","Type":"ContainerDied","Data":"c55680c5ca1b315726fb28e2b61e1047676c2bde171c1a58fa7704e0fe5ec42a"} Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.793168 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c55680c5ca1b315726fb28e2b61e1047676c2bde171c1a58fa7704e0fe5ec42a" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.801642 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.821884 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.950747 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-config-data\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.950824 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-combined-ca-bundle\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.950900 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-ceph\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.950940 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-config-data\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951027 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-scripts\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951099 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-logs\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951119 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s8jd5\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-kube-api-access-s8jd5\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951241 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-httpd-run\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951298 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-logs\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951357 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-httpd-run\") pod \"5dc2df72-1be4-4bad-8282-bb1b65226de0\" (UID: \"5dc2df72-1be4-4bad-8282-bb1b65226de0\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951395 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ml7v\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-kube-api-access-2ml7v\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951413 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-scripts\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951458 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-ceph\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.951531 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-combined-ca-bundle\") pod \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\" (UID: \"cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7\") " Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.954629 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.955315 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.956576 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-logs" (OuterVolumeSpecName: "logs") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.961545 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-ceph" (OuterVolumeSpecName: "ceph") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.968109 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-scripts" (OuterVolumeSpecName: "scripts") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.968190 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-ceph" (OuterVolumeSpecName: "ceph") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.968228 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-kube-api-access-s8jd5" (OuterVolumeSpecName: "kube-api-access-s8jd5") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "kube-api-access-s8jd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.969184 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-kube-api-access-2ml7v" (OuterVolumeSpecName: "kube-api-access-2ml7v") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "kube-api-access-2ml7v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.969837 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-logs" (OuterVolumeSpecName: "logs") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:12:29 crc kubenswrapper[4857]: I1128 15:12:29.975320 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-scripts" (OuterVolumeSpecName: "scripts") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.015242 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.038022 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056457 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ml7v\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-kube-api-access-2ml7v\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056578 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056588 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056596 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056605 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056614 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056622 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056629 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056637 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s8jd5\" (UniqueName: \"kubernetes.io/projected/5dc2df72-1be4-4bad-8282-bb1b65226de0-kube-api-access-s8jd5\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056645 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056653 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.056662 4857 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5dc2df72-1be4-4bad-8282-bb1b65226de0-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.071688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-config-data" (OuterVolumeSpecName: "config-data") pod "5dc2df72-1be4-4bad-8282-bb1b65226de0" (UID: "5dc2df72-1be4-4bad-8282-bb1b65226de0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.096177 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-config-data" (OuterVolumeSpecName: "config-data") pod "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" (UID: "cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.158504 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.158546 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dc2df72-1be4-4bad-8282-bb1b65226de0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.807401 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c4cb88c9c-rqnkt" event={"ID":"a9996acb-df90-4cbb-9ef6-ab35be1f070d","Type":"ContainerStarted","Data":"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e"} Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.810854 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66cdcfc869-s88hx" event={"ID":"26ad8963-2e63-4a75-9de9-90e28f9153d5","Type":"ContainerStarted","Data":"f58a5a2bcb297d26965ef57a0757d481ad8ad9333943bd5d5fa90f9fb903146d"} Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.813483 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-854678bdd5-rtdj4" event={"ID":"3f765626-ba83-4476-8183-af7018c79378","Type":"ContainerStarted","Data":"dcd0262e12092523c7b6336fd5ef75d1ea913079025cea5f6c701028194e24af"} Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.813599 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.813600 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.855683 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.866261 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.879226 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.896124 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.920209 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: E1128 15:12:30.920937 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-log" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.921091 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-log" Nov 28 15:12:30 crc kubenswrapper[4857]: E1128 15:12:30.921198 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-httpd" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.921274 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-httpd" Nov 28 15:12:30 crc kubenswrapper[4857]: E1128 15:12:30.921366 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-httpd" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.921461 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-httpd" Nov 28 15:12:30 crc kubenswrapper[4857]: E1128 15:12:30.921564 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-log" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.921652 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-log" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.922164 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-httpd" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.922275 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-log" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.922369 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" containerName="glance-log" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.922450 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" containerName="glance-httpd" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.924354 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.927344 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.927518 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.928024 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-4992l" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.933218 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.960128 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.961836 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.966613 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 28 15:12:30 crc kubenswrapper[4857]: I1128 15:12:30.983035 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.092372 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hshk6\" (UniqueName: \"kubernetes.io/projected/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-kube-api-access-hshk6\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.092417 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-config-data\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.092444 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-ceph\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.092476 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c9d01547-142e-4829-b702-ba934ad013e9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093033 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093126 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093318 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28b8c\" (UniqueName: \"kubernetes.io/projected/c9d01547-142e-4829-b702-ba934ad013e9-kube-api-access-28b8c\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093510 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c9d01547-142e-4829-b702-ba934ad013e9-logs\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093670 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-scripts\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093719 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c9d01547-142e-4829-b702-ba934ad013e9-ceph\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093806 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.093915 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.094025 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-logs\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196138 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196212 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28b8c\" (UniqueName: \"kubernetes.io/projected/c9d01547-142e-4829-b702-ba934ad013e9-kube-api-access-28b8c\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196241 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c9d01547-142e-4829-b702-ba934ad013e9-logs\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196286 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-scripts\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196311 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c9d01547-142e-4829-b702-ba934ad013e9-ceph\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196339 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196370 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196399 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-logs\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196431 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hshk6\" (UniqueName: \"kubernetes.io/projected/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-kube-api-access-hshk6\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196456 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-config-data\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196476 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-ceph\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196506 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c9d01547-142e-4829-b702-ba934ad013e9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196646 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.196674 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.197672 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c9d01547-142e-4829-b702-ba934ad013e9-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.197758 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c9d01547-142e-4829-b702-ba934ad013e9-logs\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.208570 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-logs\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.209677 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-scripts\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.209690 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.210002 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.219002 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-config-data\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.221480 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/c9d01547-142e-4829-b702-ba934ad013e9-ceph\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.225015 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-ceph\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.225416 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-scripts\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.226576 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28b8c\" (UniqueName: \"kubernetes.io/projected/c9d01547-142e-4829-b702-ba934ad013e9-kube-api-access-28b8c\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.226624 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-config-data\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.227424 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hshk6\" (UniqueName: \"kubernetes.io/projected/1db6fc24-0e64-4d28-b4c6-dd02be9f6b53-kube-api-access-hshk6\") pod \"glance-default-internal-api-0\" (UID: \"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53\") " pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.228235 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9d01547-142e-4829-b702-ba934ad013e9-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c9d01547-142e-4829-b702-ba934ad013e9\") " pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.260317 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.287895 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.826343 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-854678bdd5-rtdj4" event={"ID":"3f765626-ba83-4476-8183-af7018c79378","Type":"ContainerStarted","Data":"61526b43eb3e5bb0e87f1a8344d05a00af667dc331e51474a297a7449c826e44"} Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.832385 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c4cb88c9c-rqnkt" event={"ID":"a9996acb-df90-4cbb-9ef6-ab35be1f070d","Type":"ContainerStarted","Data":"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb"} Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.832510 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c4cb88c9c-rqnkt" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon-log" containerID="cri-o://2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e" gracePeriod=30 Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.832575 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c4cb88c9c-rqnkt" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon" containerID="cri-o://92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb" gracePeriod=30 Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.841567 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66cdcfc869-s88hx" event={"ID":"26ad8963-2e63-4a75-9de9-90e28f9153d5","Type":"ContainerStarted","Data":"6ac547a69886adda1571b0f0306313b35d1ede3c084fd37ee7ab09b390cffc1d"} Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.875550 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.889630 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-854678bdd5-rtdj4" podStartSLOduration=3.936622894 podStartE2EDuration="11.889599783s" podCreationTimestamp="2025-11-28 15:12:20 +0000 UTC" firstStartedPulling="2025-11-28 15:12:21.835208804 +0000 UTC m=+6191.959150241" lastFinishedPulling="2025-11-28 15:12:29.788185683 +0000 UTC m=+6199.912127130" observedRunningTime="2025-11-28 15:12:31.875888636 +0000 UTC m=+6201.999830073" watchObservedRunningTime="2025-11-28 15:12:31.889599783 +0000 UTC m=+6202.013541210" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.920336 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-66cdcfc869-s88hx" podStartSLOduration=3.835478272 podStartE2EDuration="10.920299893s" podCreationTimestamp="2025-11-28 15:12:21 +0000 UTC" firstStartedPulling="2025-11-28 15:12:22.698369219 +0000 UTC m=+6192.822310656" lastFinishedPulling="2025-11-28 15:12:29.78319083 +0000 UTC m=+6199.907132277" observedRunningTime="2025-11-28 15:12:31.899812655 +0000 UTC m=+6202.023754092" watchObservedRunningTime="2025-11-28 15:12:31.920299893 +0000 UTC m=+6202.044241330" Nov 28 15:12:31 crc kubenswrapper[4857]: I1128 15:12:31.969519 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6c4cb88c9c-rqnkt" podStartSLOduration=4.391423281 podStartE2EDuration="11.969484846s" podCreationTimestamp="2025-11-28 15:12:20 +0000 UTC" firstStartedPulling="2025-11-28 15:12:22.274242191 +0000 UTC m=+6192.398183628" lastFinishedPulling="2025-11-28 15:12:29.852303756 +0000 UTC m=+6199.976245193" observedRunningTime="2025-11-28 15:12:31.955231746 +0000 UTC m=+6202.079173183" watchObservedRunningTime="2025-11-28 15:12:31.969484846 +0000 UTC m=+6202.093426283" Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.074545 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.102239 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.102973 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.251741 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dc2df72-1be4-4bad-8282-bb1b65226de0" path="/var/lib/kubelet/pods/5dc2df72-1be4-4bad-8282-bb1b65226de0/volumes" Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.253472 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7" path="/var/lib/kubelet/pods/cf2fe1ae-4f37-46dc-9d52-61b1df5f5fc7/volumes" Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.860252 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c9d01547-142e-4829-b702-ba934ad013e9","Type":"ContainerStarted","Data":"fc09d54d9dda13c53f31e25b69209e50948891703a8186e90182b54b69e7918e"} Nov 28 15:12:32 crc kubenswrapper[4857]: I1128 15:12:32.866402 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53","Type":"ContainerStarted","Data":"82da407fc5a4789ee414d6b237cde77fc8a6d083f9b613c9cb056ac2dffef10e"} Nov 28 15:12:33 crc kubenswrapper[4857]: I1128 15:12:33.879519 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53","Type":"ContainerStarted","Data":"d6f59ede758bc36cd0f5edc8497c1b8f88c41a702e3947b8b23a3dc8dcc58407"} Nov 28 15:12:33 crc kubenswrapper[4857]: I1128 15:12:33.884256 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c9d01547-142e-4829-b702-ba934ad013e9","Type":"ContainerStarted","Data":"1ed54836f0a034fc1e2d5d50d6086289eaec8af37d9ea949b723f582d836e997"} Nov 28 15:12:34 crc kubenswrapper[4857]: I1128 15:12:34.907443 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"1db6fc24-0e64-4d28-b4c6-dd02be9f6b53","Type":"ContainerStarted","Data":"2c56d3d536f383ccef113ee52913cd4a0cd0114cbde6cdcf985ddd54ea5a140a"} Nov 28 15:12:34 crc kubenswrapper[4857]: I1128 15:12:34.914912 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c9d01547-142e-4829-b702-ba934ad013e9","Type":"ContainerStarted","Data":"4fb6b5c4e4bf98049c9d93e98e740613f7ee8299ba0ed42f709eb263a8386133"} Nov 28 15:12:34 crc kubenswrapper[4857]: I1128 15:12:34.930736 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.930706592 podStartE2EDuration="4.930706592s" podCreationTimestamp="2025-11-28 15:12:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:12:34.929983813 +0000 UTC m=+6205.053925430" watchObservedRunningTime="2025-11-28 15:12:34.930706592 +0000 UTC m=+6205.054648029" Nov 28 15:12:34 crc kubenswrapper[4857]: I1128 15:12:34.962084 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.96206047 podStartE2EDuration="4.96206047s" podCreationTimestamp="2025-11-28 15:12:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:12:34.956742128 +0000 UTC m=+6205.080683565" watchObservedRunningTime="2025-11-28 15:12:34.96206047 +0000 UTC m=+6205.086001927" Nov 28 15:12:40 crc kubenswrapper[4857]: I1128 15:12:40.761726 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:40 crc kubenswrapper[4857]: I1128 15:12:40.762412 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:12:40 crc kubenswrapper[4857]: I1128 15:12:40.878961 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.262241 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.262318 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.289814 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.289895 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.315713 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.320408 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.332843 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:12:41 crc kubenswrapper[4857]: I1128 15:12:41.359763 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 28 15:12:42 crc kubenswrapper[4857]: I1128 15:12:42.056144 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:42 crc kubenswrapper[4857]: I1128 15:12:42.057786 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:12:42 crc kubenswrapper[4857]: I1128 15:12:42.057827 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:42 crc kubenswrapper[4857]: I1128 15:12:42.057854 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 28 15:12:42 crc kubenswrapper[4857]: I1128 15:12:42.103942 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-66cdcfc869-s88hx" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.080228 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.080695 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.080304 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.080751 4857 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.385037 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.436768 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.606314 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:44 crc kubenswrapper[4857]: I1128 15:12:44.609789 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 28 15:12:50 crc kubenswrapper[4857]: I1128 15:12:50.764370 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-854678bdd5-rtdj4" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.112:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.112:8080: connect: connection refused" Nov 28 15:12:53 crc kubenswrapper[4857]: I1128 15:12:53.938941 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:55 crc kubenswrapper[4857]: I1128 15:12:55.441379 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:12:55 crc kubenswrapper[4857]: I1128 15:12:55.511853 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-854678bdd5-rtdj4"] Nov 28 15:12:55 crc kubenswrapper[4857]: I1128 15:12:55.512645 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-854678bdd5-rtdj4" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon-log" containerID="cri-o://dcd0262e12092523c7b6336fd5ef75d1ea913079025cea5f6c701028194e24af" gracePeriod=30 Nov 28 15:12:55 crc kubenswrapper[4857]: I1128 15:12:55.512842 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-854678bdd5-rtdj4" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon" containerID="cri-o://61526b43eb3e5bb0e87f1a8344d05a00af667dc331e51474a297a7449c826e44" gracePeriod=30 Nov 28 15:12:57 crc kubenswrapper[4857]: I1128 15:12:57.266778 4857 generic.go:334] "Generic (PLEG): container finished" podID="3f765626-ba83-4476-8183-af7018c79378" containerID="61526b43eb3e5bb0e87f1a8344d05a00af667dc331e51474a297a7449c826e44" exitCode=0 Nov 28 15:12:57 crc kubenswrapper[4857]: I1128 15:12:57.266861 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-854678bdd5-rtdj4" event={"ID":"3f765626-ba83-4476-8183-af7018c79378","Type":"ContainerDied","Data":"61526b43eb3e5bb0e87f1a8344d05a00af667dc331e51474a297a7449c826e44"} Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.259683 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342353 4857 generic.go:334] "Generic (PLEG): container finished" podID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerID="92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb" exitCode=137 Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342397 4857 generic.go:334] "Generic (PLEG): container finished" podID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerID="2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e" exitCode=137 Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342422 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c4cb88c9c-rqnkt" event={"ID":"a9996acb-df90-4cbb-9ef6-ab35be1f070d","Type":"ContainerDied","Data":"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb"} Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342458 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c4cb88c9c-rqnkt" event={"ID":"a9996acb-df90-4cbb-9ef6-ab35be1f070d","Type":"ContainerDied","Data":"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e"} Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c4cb88c9c-rqnkt" event={"ID":"a9996acb-df90-4cbb-9ef6-ab35be1f070d","Type":"ContainerDied","Data":"3cd4d565a88b2d007d0deadf3c3e2ae8236d4a97ff50c6ea482b44acb160355e"} Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342495 4857 scope.go:117] "RemoveContainer" containerID="92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.342694 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c4cb88c9c-rqnkt" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.434928 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a9996acb-df90-4cbb-9ef6-ab35be1f070d-horizon-secret-key\") pod \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.435258 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-config-data\") pod \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.435324 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-scripts\") pod \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.435396 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nn5h\" (UniqueName: \"kubernetes.io/projected/a9996acb-df90-4cbb-9ef6-ab35be1f070d-kube-api-access-5nn5h\") pod \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.435533 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9996acb-df90-4cbb-9ef6-ab35be1f070d-logs\") pod \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\" (UID: \"a9996acb-df90-4cbb-9ef6-ab35be1f070d\") " Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.436039 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9996acb-df90-4cbb-9ef6-ab35be1f070d-logs" (OuterVolumeSpecName: "logs") pod "a9996acb-df90-4cbb-9ef6-ab35be1f070d" (UID: "a9996acb-df90-4cbb-9ef6-ab35be1f070d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.436536 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9996acb-df90-4cbb-9ef6-ab35be1f070d-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.442127 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9996acb-df90-4cbb-9ef6-ab35be1f070d-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a9996acb-df90-4cbb-9ef6-ab35be1f070d" (UID: "a9996acb-df90-4cbb-9ef6-ab35be1f070d"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.444143 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9996acb-df90-4cbb-9ef6-ab35be1f070d-kube-api-access-5nn5h" (OuterVolumeSpecName: "kube-api-access-5nn5h") pod "a9996acb-df90-4cbb-9ef6-ab35be1f070d" (UID: "a9996acb-df90-4cbb-9ef6-ab35be1f070d"). InnerVolumeSpecName "kube-api-access-5nn5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.470040 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-scripts" (OuterVolumeSpecName: "scripts") pod "a9996acb-df90-4cbb-9ef6-ab35be1f070d" (UID: "a9996acb-df90-4cbb-9ef6-ab35be1f070d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.516501 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-config-data" (OuterVolumeSpecName: "config-data") pod "a9996acb-df90-4cbb-9ef6-ab35be1f070d" (UID: "a9996acb-df90-4cbb-9ef6-ab35be1f070d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.528898 4857 scope.go:117] "RemoveContainer" containerID="2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.539317 4857 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a9996acb-df90-4cbb-9ef6-ab35be1f070d-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.539359 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.539371 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a9996acb-df90-4cbb-9ef6-ab35be1f070d-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.539381 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nn5h\" (UniqueName: \"kubernetes.io/projected/a9996acb-df90-4cbb-9ef6-ab35be1f070d-kube-api-access-5nn5h\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.554593 4857 scope.go:117] "RemoveContainer" containerID="92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb" Nov 28 15:13:02 crc kubenswrapper[4857]: E1128 15:13:02.554992 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb\": container with ID starting with 92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb not found: ID does not exist" containerID="92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.555069 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb"} err="failed to get container status \"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb\": rpc error: code = NotFound desc = could not find container \"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb\": container with ID starting with 92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb not found: ID does not exist" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.555102 4857 scope.go:117] "RemoveContainer" containerID="2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e" Nov 28 15:13:02 crc kubenswrapper[4857]: E1128 15:13:02.555849 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e\": container with ID starting with 2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e not found: ID does not exist" containerID="2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.555888 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e"} err="failed to get container status \"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e\": rpc error: code = NotFound desc = could not find container \"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e\": container with ID starting with 2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e not found: ID does not exist" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.555918 4857 scope.go:117] "RemoveContainer" containerID="92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.556264 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb"} err="failed to get container status \"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb\": rpc error: code = NotFound desc = could not find container \"92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb\": container with ID starting with 92839079b25b1cfa8701b6754560288017eeebfbcc173e5ae0e4666159037ffb not found: ID does not exist" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.556288 4857 scope.go:117] "RemoveContainer" containerID="2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.556528 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e"} err="failed to get container status \"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e\": rpc error: code = NotFound desc = could not find container \"2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e\": container with ID starting with 2a4b0d3497a31c279b13aa994d6356b22618e07f161549c600ada0d2e498d02e not found: ID does not exist" Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.697630 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c4cb88c9c-rqnkt"] Nov 28 15:13:02 crc kubenswrapper[4857]: I1128 15:13:02.707082 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6c4cb88c9c-rqnkt"] Nov 28 15:13:04 crc kubenswrapper[4857]: I1128 15:13:04.242162 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" path="/var/lib/kubelet/pods/a9996acb-df90-4cbb-9ef6-ab35be1f070d/volumes" Nov 28 15:13:06 crc kubenswrapper[4857]: I1128 15:13:06.068150 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-a08e-account-create-update-27n5p"] Nov 28 15:13:06 crc kubenswrapper[4857]: I1128 15:13:06.084224 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-jp2pg"] Nov 28 15:13:06 crc kubenswrapper[4857]: I1128 15:13:06.097532 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-jp2pg"] Nov 28 15:13:06 crc kubenswrapper[4857]: I1128 15:13:06.109107 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-a08e-account-create-update-27n5p"] Nov 28 15:13:06 crc kubenswrapper[4857]: I1128 15:13:06.249867 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c98cbc3-a049-4544-b3c0-48a74ec08df4" path="/var/lib/kubelet/pods/6c98cbc3-a049-4544-b3c0-48a74ec08df4/volumes" Nov 28 15:13:06 crc kubenswrapper[4857]: I1128 15:13:06.251179 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdb8944a-abc2-4787-949b-fea63d4eba70" path="/var/lib/kubelet/pods/cdb8944a-abc2-4787-949b-fea63d4eba70/volumes" Nov 28 15:13:14 crc kubenswrapper[4857]: I1128 15:13:14.082796 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-m2gp4"] Nov 28 15:13:14 crc kubenswrapper[4857]: I1128 15:13:14.103122 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-m2gp4"] Nov 28 15:13:14 crc kubenswrapper[4857]: I1128 15:13:14.248982 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea639f28-3b3c-4789-ad0c-4ae92923cb07" path="/var/lib/kubelet/pods/ea639f28-3b3c-4789-ad0c-4ae92923cb07/volumes" Nov 28 15:13:20 crc kubenswrapper[4857]: I1128 15:13:20.758060 4857 scope.go:117] "RemoveContainer" containerID="d80414fdb19849f61b5dec754fb96f10a2b88338202789c3582b8678a86da348" Nov 28 15:13:20 crc kubenswrapper[4857]: I1128 15:13:20.823661 4857 scope.go:117] "RemoveContainer" containerID="16c1e790752c7280a64ad482220e23888921db72437b11a041d8fb9cfad8945c" Nov 28 15:13:20 crc kubenswrapper[4857]: I1128 15:13:20.963714 4857 scope.go:117] "RemoveContainer" containerID="7fc7b802ed3133a945d349157a959758c40721b83025ececa525f9df3d7bdc33" Nov 28 15:13:21 crc kubenswrapper[4857]: I1128 15:13:21.034812 4857 scope.go:117] "RemoveContainer" containerID="5d6fe1a0c601b448ca204da2df776fd586604ead34e0b8cdb4f9e2c73083753a" Nov 28 15:13:21 crc kubenswrapper[4857]: I1128 15:13:21.094402 4857 scope.go:117] "RemoveContainer" containerID="e037644e828981be12dc6bec2a5c9f6c6cc372d7127f22cff367c28cd052b1c8" Nov 28 15:13:21 crc kubenswrapper[4857]: I1128 15:13:21.141722 4857 scope.go:117] "RemoveContainer" containerID="1d0804c4663772502e2852805fbb2fb371c04769987f092f6024c50e6b59e167" Nov 28 15:13:21 crc kubenswrapper[4857]: I1128 15:13:21.207731 4857 scope.go:117] "RemoveContainer" containerID="fd53eaaab460ddc9dbffd5a200e30daab396566fb89befa6db79d514fad93ab7" Nov 28 15:13:21 crc kubenswrapper[4857]: I1128 15:13:21.242929 4857 scope.go:117] "RemoveContainer" containerID="e2e67b60862be42ab9c310508d71414a992de3cb1c03272009bbe6432e6cccd1" Nov 28 15:13:25 crc kubenswrapper[4857]: I1128 15:13:25.949462 4857 generic.go:334] "Generic (PLEG): container finished" podID="3f765626-ba83-4476-8183-af7018c79378" containerID="dcd0262e12092523c7b6336fd5ef75d1ea913079025cea5f6c701028194e24af" exitCode=137 Nov 28 15:13:25 crc kubenswrapper[4857]: I1128 15:13:25.949620 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-854678bdd5-rtdj4" event={"ID":"3f765626-ba83-4476-8183-af7018c79378","Type":"ContainerDied","Data":"dcd0262e12092523c7b6336fd5ef75d1ea913079025cea5f6c701028194e24af"} Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.534705 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.666089 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-config-data\") pod \"3f765626-ba83-4476-8183-af7018c79378\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.666347 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-scripts\") pod \"3f765626-ba83-4476-8183-af7018c79378\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.666680 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f765626-ba83-4476-8183-af7018c79378-horizon-secret-key\") pod \"3f765626-ba83-4476-8183-af7018c79378\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.666900 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrdzz\" (UniqueName: \"kubernetes.io/projected/3f765626-ba83-4476-8183-af7018c79378-kube-api-access-vrdzz\") pod \"3f765626-ba83-4476-8183-af7018c79378\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.667005 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f765626-ba83-4476-8183-af7018c79378-logs\") pod \"3f765626-ba83-4476-8183-af7018c79378\" (UID: \"3f765626-ba83-4476-8183-af7018c79378\") " Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.667771 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f765626-ba83-4476-8183-af7018c79378-logs" (OuterVolumeSpecName: "logs") pod "3f765626-ba83-4476-8183-af7018c79378" (UID: "3f765626-ba83-4476-8183-af7018c79378"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.673804 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f765626-ba83-4476-8183-af7018c79378-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "3f765626-ba83-4476-8183-af7018c79378" (UID: "3f765626-ba83-4476-8183-af7018c79378"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.674877 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f765626-ba83-4476-8183-af7018c79378-kube-api-access-vrdzz" (OuterVolumeSpecName: "kube-api-access-vrdzz") pod "3f765626-ba83-4476-8183-af7018c79378" (UID: "3f765626-ba83-4476-8183-af7018c79378"). InnerVolumeSpecName "kube-api-access-vrdzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.697996 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-scripts" (OuterVolumeSpecName: "scripts") pod "3f765626-ba83-4476-8183-af7018c79378" (UID: "3f765626-ba83-4476-8183-af7018c79378"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.717361 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-config-data" (OuterVolumeSpecName: "config-data") pod "3f765626-ba83-4476-8183-af7018c79378" (UID: "3f765626-ba83-4476-8183-af7018c79378"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.769505 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrdzz\" (UniqueName: \"kubernetes.io/projected/3f765626-ba83-4476-8183-af7018c79378-kube-api-access-vrdzz\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.769544 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3f765626-ba83-4476-8183-af7018c79378-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.769558 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.769572 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3f765626-ba83-4476-8183-af7018c79378-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.769586 4857 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/3f765626-ba83-4476-8183-af7018c79378-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.973626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-854678bdd5-rtdj4" event={"ID":"3f765626-ba83-4476-8183-af7018c79378","Type":"ContainerDied","Data":"59e32b174ade69b1e7494867c68af181e66c039ee80ff7364084bf6eda93183d"} Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.973709 4857 scope.go:117] "RemoveContainer" containerID="61526b43eb3e5bb0e87f1a8344d05a00af667dc331e51474a297a7449c826e44" Nov 28 15:13:26 crc kubenswrapper[4857]: I1128 15:13:26.974690 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-854678bdd5-rtdj4" Nov 28 15:13:27 crc kubenswrapper[4857]: I1128 15:13:27.032780 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-854678bdd5-rtdj4"] Nov 28 15:13:27 crc kubenswrapper[4857]: I1128 15:13:27.049649 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-854678bdd5-rtdj4"] Nov 28 15:13:27 crc kubenswrapper[4857]: I1128 15:13:27.251272 4857 scope.go:117] "RemoveContainer" containerID="dcd0262e12092523c7b6336fd5ef75d1ea913079025cea5f6c701028194e24af" Nov 28 15:13:28 crc kubenswrapper[4857]: I1128 15:13:28.263244 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f765626-ba83-4476-8183-af7018c79378" path="/var/lib/kubelet/pods/3f765626-ba83-4476-8183-af7018c79378/volumes" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.509419 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5669786d45-d96bq"] Nov 28 15:13:38 crc kubenswrapper[4857]: E1128 15:13:38.510626 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon-log" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510641 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon-log" Nov 28 15:13:38 crc kubenswrapper[4857]: E1128 15:13:38.510654 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510660 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon" Nov 28 15:13:38 crc kubenswrapper[4857]: E1128 15:13:38.510671 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510678 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon" Nov 28 15:13:38 crc kubenswrapper[4857]: E1128 15:13:38.510706 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon-log" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510713 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon-log" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510894 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510907 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon-log" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510919 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9996acb-df90-4cbb-9ef6-ab35be1f070d" containerName="horizon" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.510937 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f765626-ba83-4476-8183-af7018c79378" containerName="horizon-log" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.512365 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.528121 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5669786d45-d96bq"] Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.596567 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-logs\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.596685 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-config-data\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.596715 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-horizon-secret-key\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.596741 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw89w\" (UniqueName: \"kubernetes.io/projected/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-kube-api-access-bw89w\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.596837 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-scripts\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.699440 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-config-data\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.699514 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-horizon-secret-key\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.699549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw89w\" (UniqueName: \"kubernetes.io/projected/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-kube-api-access-bw89w\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.699617 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-scripts\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.699723 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-logs\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.700339 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-logs\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.701152 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-scripts\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.702587 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-config-data\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.715874 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-horizon-secret-key\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.718734 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw89w\" (UniqueName: \"kubernetes.io/projected/90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77-kube-api-access-bw89w\") pod \"horizon-5669786d45-d96bq\" (UID: \"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77\") " pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:38 crc kubenswrapper[4857]: I1128 15:13:38.831551 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.402853 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5669786d45-d96bq"] Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.690475 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-5drfb"] Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.692295 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-5drfb" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.717204 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-5drfb"] Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.837231 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwngw\" (UniqueName: \"kubernetes.io/projected/6d8d5035-d774-4c0d-a530-62382257bc8f-kube-api-access-hwngw\") pod \"heat-db-create-5drfb\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " pod="openstack/heat-db-create-5drfb" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.837761 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d8d5035-d774-4c0d-a530-62382257bc8f-operator-scripts\") pod \"heat-db-create-5drfb\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " pod="openstack/heat-db-create-5drfb" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.898683 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-ca47-account-create-update-w72m7"] Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.900153 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.905004 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.913373 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-ca47-account-create-update-w72m7"] Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.941046 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwngw\" (UniqueName: \"kubernetes.io/projected/6d8d5035-d774-4c0d-a530-62382257bc8f-kube-api-access-hwngw\") pod \"heat-db-create-5drfb\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " pod="openstack/heat-db-create-5drfb" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.941192 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d8d5035-d774-4c0d-a530-62382257bc8f-operator-scripts\") pod \"heat-db-create-5drfb\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " pod="openstack/heat-db-create-5drfb" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.942069 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d8d5035-d774-4c0d-a530-62382257bc8f-operator-scripts\") pod \"heat-db-create-5drfb\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " pod="openstack/heat-db-create-5drfb" Nov 28 15:13:39 crc kubenswrapper[4857]: I1128 15:13:39.973561 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwngw\" (UniqueName: \"kubernetes.io/projected/6d8d5035-d774-4c0d-a530-62382257bc8f-kube-api-access-hwngw\") pod \"heat-db-create-5drfb\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " pod="openstack/heat-db-create-5drfb" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.044340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cscxw\" (UniqueName: \"kubernetes.io/projected/ca679d3b-5662-4e16-a37d-991087f4857f-kube-api-access-cscxw\") pod \"heat-ca47-account-create-update-w72m7\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.044641 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca679d3b-5662-4e16-a37d-991087f4857f-operator-scripts\") pod \"heat-ca47-account-create-update-w72m7\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.059863 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-5drfb" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.136527 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5669786d45-d96bq" event={"ID":"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77","Type":"ContainerStarted","Data":"99a4246cb93b8e47979c4ca29691e6f6f839c5243b982432cdc6d0bdd320a779"} Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.136578 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5669786d45-d96bq" event={"ID":"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77","Type":"ContainerStarted","Data":"9a0f847244d1143846d19f7a9dd2e3a11fc31495e6d4e0f21059799b3a322e67"} Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.136588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5669786d45-d96bq" event={"ID":"90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77","Type":"ContainerStarted","Data":"8991d5f0a6c739e8e2d183d496792737b591ec8afd2b60014505076c75a30519"} Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.146553 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca679d3b-5662-4e16-a37d-991087f4857f-operator-scripts\") pod \"heat-ca47-account-create-update-w72m7\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.147051 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cscxw\" (UniqueName: \"kubernetes.io/projected/ca679d3b-5662-4e16-a37d-991087f4857f-kube-api-access-cscxw\") pod \"heat-ca47-account-create-update-w72m7\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.147670 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca679d3b-5662-4e16-a37d-991087f4857f-operator-scripts\") pod \"heat-ca47-account-create-update-w72m7\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.159049 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5669786d45-d96bq" podStartSLOduration=2.159013605 podStartE2EDuration="2.159013605s" podCreationTimestamp="2025-11-28 15:13:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:13:40.157678189 +0000 UTC m=+6270.281619646" watchObservedRunningTime="2025-11-28 15:13:40.159013605 +0000 UTC m=+6270.282955052" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.174654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cscxw\" (UniqueName: \"kubernetes.io/projected/ca679d3b-5662-4e16-a37d-991087f4857f-kube-api-access-cscxw\") pod \"heat-ca47-account-create-update-w72m7\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.224099 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.704218 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-5drfb"] Nov 28 15:13:40 crc kubenswrapper[4857]: I1128 15:13:40.951088 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-ca47-account-create-update-w72m7"] Nov 28 15:13:40 crc kubenswrapper[4857]: W1128 15:13:40.954733 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podca679d3b_5662_4e16_a37d_991087f4857f.slice/crio-8048bd7e338f9015c698d613c82dfbeee52d1b7e48e6f7dd1853f34bf7e0edae WatchSource:0}: Error finding container 8048bd7e338f9015c698d613c82dfbeee52d1b7e48e6f7dd1853f34bf7e0edae: Status 404 returned error can't find the container with id 8048bd7e338f9015c698d613c82dfbeee52d1b7e48e6f7dd1853f34bf7e0edae Nov 28 15:13:41 crc kubenswrapper[4857]: I1128 15:13:41.150781 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-ca47-account-create-update-w72m7" event={"ID":"ca679d3b-5662-4e16-a37d-991087f4857f","Type":"ContainerStarted","Data":"8048bd7e338f9015c698d613c82dfbeee52d1b7e48e6f7dd1853f34bf7e0edae"} Nov 28 15:13:41 crc kubenswrapper[4857]: I1128 15:13:41.152064 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-5drfb" event={"ID":"6d8d5035-d774-4c0d-a530-62382257bc8f","Type":"ContainerStarted","Data":"153141947c8750b84fa183f2e95ba4209acd921494e537400d06bf308be0e767"} Nov 28 15:13:41 crc kubenswrapper[4857]: I1128 15:13:41.152151 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-5drfb" event={"ID":"6d8d5035-d774-4c0d-a530-62382257bc8f","Type":"ContainerStarted","Data":"80801fb83f496e1c54ff7f6b575a3256348b92f6a2843b38807acaf45c76300e"} Nov 28 15:13:41 crc kubenswrapper[4857]: I1128 15:13:41.170717 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-5drfb" podStartSLOduration=2.170689808 podStartE2EDuration="2.170689808s" podCreationTimestamp="2025-11-28 15:13:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:13:41.168832388 +0000 UTC m=+6271.292773825" watchObservedRunningTime="2025-11-28 15:13:41.170689808 +0000 UTC m=+6271.294631245" Nov 28 15:13:41 crc kubenswrapper[4857]: I1128 15:13:41.308442 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:13:41 crc kubenswrapper[4857]: I1128 15:13:41.308730 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:13:42 crc kubenswrapper[4857]: I1128 15:13:42.167569 4857 generic.go:334] "Generic (PLEG): container finished" podID="6d8d5035-d774-4c0d-a530-62382257bc8f" containerID="153141947c8750b84fa183f2e95ba4209acd921494e537400d06bf308be0e767" exitCode=0 Nov 28 15:13:42 crc kubenswrapper[4857]: I1128 15:13:42.167853 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-5drfb" event={"ID":"6d8d5035-d774-4c0d-a530-62382257bc8f","Type":"ContainerDied","Data":"153141947c8750b84fa183f2e95ba4209acd921494e537400d06bf308be0e767"} Nov 28 15:13:42 crc kubenswrapper[4857]: I1128 15:13:42.182105 4857 generic.go:334] "Generic (PLEG): container finished" podID="ca679d3b-5662-4e16-a37d-991087f4857f" containerID="87ebd22c82949004d2ec9780a6dcaf7bb0c930227c13fac47716b29191785ca5" exitCode=0 Nov 28 15:13:42 crc kubenswrapper[4857]: I1128 15:13:42.182158 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-ca47-account-create-update-w72m7" event={"ID":"ca679d3b-5662-4e16-a37d-991087f4857f","Type":"ContainerDied","Data":"87ebd22c82949004d2ec9780a6dcaf7bb0c930227c13fac47716b29191785ca5"} Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.716813 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-5drfb" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.726781 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.894833 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cscxw\" (UniqueName: \"kubernetes.io/projected/ca679d3b-5662-4e16-a37d-991087f4857f-kube-api-access-cscxw\") pod \"ca679d3b-5662-4e16-a37d-991087f4857f\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.895569 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca679d3b-5662-4e16-a37d-991087f4857f-operator-scripts\") pod \"ca679d3b-5662-4e16-a37d-991087f4857f\" (UID: \"ca679d3b-5662-4e16-a37d-991087f4857f\") " Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.896047 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwngw\" (UniqueName: \"kubernetes.io/projected/6d8d5035-d774-4c0d-a530-62382257bc8f-kube-api-access-hwngw\") pod \"6d8d5035-d774-4c0d-a530-62382257bc8f\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.896348 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d8d5035-d774-4c0d-a530-62382257bc8f-operator-scripts\") pod \"6d8d5035-d774-4c0d-a530-62382257bc8f\" (UID: \"6d8d5035-d774-4c0d-a530-62382257bc8f\") " Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.896541 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ca679d3b-5662-4e16-a37d-991087f4857f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ca679d3b-5662-4e16-a37d-991087f4857f" (UID: "ca679d3b-5662-4e16-a37d-991087f4857f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.896914 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d8d5035-d774-4c0d-a530-62382257bc8f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d8d5035-d774-4c0d-a530-62382257bc8f" (UID: "6d8d5035-d774-4c0d-a530-62382257bc8f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.897872 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ca679d3b-5662-4e16-a37d-991087f4857f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.898128 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d8d5035-d774-4c0d-a530-62382257bc8f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.906093 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ca679d3b-5662-4e16-a37d-991087f4857f-kube-api-access-cscxw" (OuterVolumeSpecName: "kube-api-access-cscxw") pod "ca679d3b-5662-4e16-a37d-991087f4857f" (UID: "ca679d3b-5662-4e16-a37d-991087f4857f"). InnerVolumeSpecName "kube-api-access-cscxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:13:43 crc kubenswrapper[4857]: I1128 15:13:43.906171 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d8d5035-d774-4c0d-a530-62382257bc8f-kube-api-access-hwngw" (OuterVolumeSpecName: "kube-api-access-hwngw") pod "6d8d5035-d774-4c0d-a530-62382257bc8f" (UID: "6d8d5035-d774-4c0d-a530-62382257bc8f"). InnerVolumeSpecName "kube-api-access-hwngw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.001052 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwngw\" (UniqueName: \"kubernetes.io/projected/6d8d5035-d774-4c0d-a530-62382257bc8f-kube-api-access-hwngw\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.001111 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cscxw\" (UniqueName: \"kubernetes.io/projected/ca679d3b-5662-4e16-a37d-991087f4857f-kube-api-access-cscxw\") on node \"crc\" DevicePath \"\"" Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.206027 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-5drfb" Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.206038 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-5drfb" event={"ID":"6d8d5035-d774-4c0d-a530-62382257bc8f","Type":"ContainerDied","Data":"80801fb83f496e1c54ff7f6b575a3256348b92f6a2843b38807acaf45c76300e"} Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.206485 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="80801fb83f496e1c54ff7f6b575a3256348b92f6a2843b38807acaf45c76300e" Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.208058 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-ca47-account-create-update-w72m7" event={"ID":"ca679d3b-5662-4e16-a37d-991087f4857f","Type":"ContainerDied","Data":"8048bd7e338f9015c698d613c82dfbeee52d1b7e48e6f7dd1853f34bf7e0edae"} Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.208205 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8048bd7e338f9015c698d613c82dfbeee52d1b7e48e6f7dd1853f34bf7e0edae" Nov 28 15:13:44 crc kubenswrapper[4857]: I1128 15:13:44.208208 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-ca47-account-create-update-w72m7" Nov 28 15:13:46 crc kubenswrapper[4857]: I1128 15:13:46.052364 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-j7v7h"] Nov 28 15:13:46 crc kubenswrapper[4857]: I1128 15:13:46.061393 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-2f33-account-create-update-rbdtg"] Nov 28 15:13:46 crc kubenswrapper[4857]: I1128 15:13:46.072207 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-j7v7h"] Nov 28 15:13:46 crc kubenswrapper[4857]: I1128 15:13:46.081212 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-2f33-account-create-update-rbdtg"] Nov 28 15:13:46 crc kubenswrapper[4857]: I1128 15:13:46.243771 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c1b914f-821e-49ac-854c-3bd5df04ae08" path="/var/lib/kubelet/pods/1c1b914f-821e-49ac-854c-3bd5df04ae08/volumes" Nov 28 15:13:46 crc kubenswrapper[4857]: I1128 15:13:46.244665 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc6a025d-a031-4f1d-99fa-1aa711ec4cde" path="/var/lib/kubelet/pods/cc6a025d-a031-4f1d-99fa-1aa711ec4cde/volumes" Nov 28 15:13:48 crc kubenswrapper[4857]: I1128 15:13:48.832354 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:48 crc kubenswrapper[4857]: I1128 15:13:48.832975 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.846496 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-95psf"] Nov 28 15:13:49 crc kubenswrapper[4857]: E1128 15:13:49.848505 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d8d5035-d774-4c0d-a530-62382257bc8f" containerName="mariadb-database-create" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.848531 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d8d5035-d774-4c0d-a530-62382257bc8f" containerName="mariadb-database-create" Nov 28 15:13:49 crc kubenswrapper[4857]: E1128 15:13:49.848554 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ca679d3b-5662-4e16-a37d-991087f4857f" containerName="mariadb-account-create-update" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.848564 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ca679d3b-5662-4e16-a37d-991087f4857f" containerName="mariadb-account-create-update" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.848829 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ca679d3b-5662-4e16-a37d-991087f4857f" containerName="mariadb-account-create-update" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.848861 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d8d5035-d774-4c0d-a530-62382257bc8f" containerName="mariadb-database-create" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.849778 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-95psf" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.859315 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-x4c4l" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.860109 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-95psf"] Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.861539 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.905594 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-config-data\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.905812 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-combined-ca-bundle\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:49 crc kubenswrapper[4857]: I1128 15:13:49.905830 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gnh7\" (UniqueName: \"kubernetes.io/projected/63a2503c-af94-42d7-b995-c2fe4018aed0-kube-api-access-5gnh7\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.007536 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-combined-ca-bundle\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.007871 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gnh7\" (UniqueName: \"kubernetes.io/projected/63a2503c-af94-42d7-b995-c2fe4018aed0-kube-api-access-5gnh7\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.008038 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-config-data\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.015011 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-combined-ca-bundle\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.015274 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-config-data\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.032589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gnh7\" (UniqueName: \"kubernetes.io/projected/63a2503c-af94-42d7-b995-c2fe4018aed0-kube-api-access-5gnh7\") pod \"heat-db-sync-95psf\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.176801 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-95psf" Nov 28 15:13:50 crc kubenswrapper[4857]: I1128 15:13:50.775801 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-95psf"] Nov 28 15:13:51 crc kubenswrapper[4857]: I1128 15:13:51.356391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-95psf" event={"ID":"63a2503c-af94-42d7-b995-c2fe4018aed0","Type":"ContainerStarted","Data":"eaa080996164e03635890624408266decd8dc11057f711c62bce84ef9a2368d9"} Nov 28 15:13:52 crc kubenswrapper[4857]: I1128 15:13:52.051241 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-d92fp"] Nov 28 15:13:52 crc kubenswrapper[4857]: I1128 15:13:52.064293 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-d92fp"] Nov 28 15:13:52 crc kubenswrapper[4857]: I1128 15:13:52.241685 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9f8162a-0373-42d9-92ac-1089becb850e" path="/var/lib/kubelet/pods/e9f8162a-0373-42d9-92ac-1089becb850e/volumes" Nov 28 15:13:58 crc kubenswrapper[4857]: I1128 15:13:58.834544 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5669786d45-d96bq" podUID="90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.117:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.117:8080: connect: connection refused" Nov 28 15:14:00 crc kubenswrapper[4857]: I1128 15:14:00.493383 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-95psf" event={"ID":"63a2503c-af94-42d7-b995-c2fe4018aed0","Type":"ContainerStarted","Data":"0379c6b834498938a2603a2438b8a9d8528e8ae567af8a1d183dcc27a5cd902a"} Nov 28 15:14:01 crc kubenswrapper[4857]: I1128 15:14:01.510309 4857 generic.go:334] "Generic (PLEG): container finished" podID="63a2503c-af94-42d7-b995-c2fe4018aed0" containerID="0379c6b834498938a2603a2438b8a9d8528e8ae567af8a1d183dcc27a5cd902a" exitCode=0 Nov 28 15:14:01 crc kubenswrapper[4857]: I1128 15:14:01.510366 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-95psf" event={"ID":"63a2503c-af94-42d7-b995-c2fe4018aed0","Type":"ContainerDied","Data":"0379c6b834498938a2603a2438b8a9d8528e8ae567af8a1d183dcc27a5cd902a"} Nov 28 15:14:02 crc kubenswrapper[4857]: I1128 15:14:02.972062 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-95psf" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.055549 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gnh7\" (UniqueName: \"kubernetes.io/projected/63a2503c-af94-42d7-b995-c2fe4018aed0-kube-api-access-5gnh7\") pod \"63a2503c-af94-42d7-b995-c2fe4018aed0\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.056004 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-config-data\") pod \"63a2503c-af94-42d7-b995-c2fe4018aed0\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.056067 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-combined-ca-bundle\") pod \"63a2503c-af94-42d7-b995-c2fe4018aed0\" (UID: \"63a2503c-af94-42d7-b995-c2fe4018aed0\") " Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.061415 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63a2503c-af94-42d7-b995-c2fe4018aed0-kube-api-access-5gnh7" (OuterVolumeSpecName: "kube-api-access-5gnh7") pod "63a2503c-af94-42d7-b995-c2fe4018aed0" (UID: "63a2503c-af94-42d7-b995-c2fe4018aed0"). InnerVolumeSpecName "kube-api-access-5gnh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.100939 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "63a2503c-af94-42d7-b995-c2fe4018aed0" (UID: "63a2503c-af94-42d7-b995-c2fe4018aed0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.135236 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-config-data" (OuterVolumeSpecName: "config-data") pod "63a2503c-af94-42d7-b995-c2fe4018aed0" (UID: "63a2503c-af94-42d7-b995-c2fe4018aed0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.159547 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.159750 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/63a2503c-af94-42d7-b995-c2fe4018aed0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.159852 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gnh7\" (UniqueName: \"kubernetes.io/projected/63a2503c-af94-42d7-b995-c2fe4018aed0-kube-api-access-5gnh7\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.537507 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-95psf" event={"ID":"63a2503c-af94-42d7-b995-c2fe4018aed0","Type":"ContainerDied","Data":"eaa080996164e03635890624408266decd8dc11057f711c62bce84ef9a2368d9"} Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.537561 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eaa080996164e03635890624408266decd8dc11057f711c62bce84ef9a2368d9" Nov 28 15:14:03 crc kubenswrapper[4857]: I1128 15:14:03.537648 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-95psf" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.521054 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-8685975d9c-mq499"] Nov 28 15:14:04 crc kubenswrapper[4857]: E1128 15:14:04.521863 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63a2503c-af94-42d7-b995-c2fe4018aed0" containerName="heat-db-sync" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.521877 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="63a2503c-af94-42d7-b995-c2fe4018aed0" containerName="heat-db-sync" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.522096 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="63a2503c-af94-42d7-b995-c2fe4018aed0" containerName="heat-db-sync" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.571788 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-8685975d9c-mq499"] Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.571982 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.584494 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.588892 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-x4c4l" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.596263 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.687235 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-55b678f96d-kk66g"] Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.688859 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.692664 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.699378 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-55b678f96d-kk66g"] Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.703812 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-combined-ca-bundle\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.703889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khcjx\" (UniqueName: \"kubernetes.io/projected/06b77386-fe69-408f-ba29-f70de0a73e6e-kube-api-access-khcjx\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.704051 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-config-data-custom\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.704204 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-config-data\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.747581 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7c69887759-78cdn"] Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.749326 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.752479 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.758637 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7c69887759-78cdn"] Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814180 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jr7g\" (UniqueName: \"kubernetes.io/projected/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-kube-api-access-2jr7g\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814237 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-combined-ca-bundle\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814282 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khcjx\" (UniqueName: \"kubernetes.io/projected/06b77386-fe69-408f-ba29-f70de0a73e6e-kube-api-access-khcjx\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814465 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-config-data\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814550 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-config-data-custom\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814634 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhwhq\" (UniqueName: \"kubernetes.io/projected/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-kube-api-access-nhwhq\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814653 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-combined-ca-bundle\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814745 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-config-data\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814779 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-config-data\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814827 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-config-data-custom\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814895 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-config-data-custom\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.814991 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-combined-ca-bundle\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.844531 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-config-data\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.845128 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-combined-ca-bundle\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.848025 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/06b77386-fe69-408f-ba29-f70de0a73e6e-config-data-custom\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.860616 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khcjx\" (UniqueName: \"kubernetes.io/projected/06b77386-fe69-408f-ba29-f70de0a73e6e-kube-api-access-khcjx\") pod \"heat-engine-8685975d9c-mq499\" (UID: \"06b77386-fe69-408f-ba29-f70de0a73e6e\") " pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918285 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-config-data-custom\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918341 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-combined-ca-bundle\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918415 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jr7g\" (UniqueName: \"kubernetes.io/projected/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-kube-api-access-2jr7g\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918464 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-config-data\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918504 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhwhq\" (UniqueName: \"kubernetes.io/projected/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-kube-api-access-nhwhq\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918521 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-combined-ca-bundle\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918551 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-config-data\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.918569 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-config-data-custom\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.922107 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-config-data-custom\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.923231 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-config-data\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.924532 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-config-data-custom\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.931345 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-config-data\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.937492 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.939787 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-combined-ca-bundle\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.939807 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-combined-ca-bundle\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.939976 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jr7g\" (UniqueName: \"kubernetes.io/projected/9a42c08c-72d5-416f-8ee8-5cecfc36b4ab-kube-api-access-2jr7g\") pod \"heat-cfnapi-7c69887759-78cdn\" (UID: \"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab\") " pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:04 crc kubenswrapper[4857]: I1128 15:14:04.944010 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhwhq\" (UniqueName: \"kubernetes.io/projected/3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c-kube-api-access-nhwhq\") pod \"heat-api-55b678f96d-kk66g\" (UID: \"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c\") " pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:05 crc kubenswrapper[4857]: I1128 15:14:05.029524 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:05 crc kubenswrapper[4857]: I1128 15:14:05.096397 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:05 crc kubenswrapper[4857]: I1128 15:14:05.487518 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-8685975d9c-mq499"] Nov 28 15:14:05 crc kubenswrapper[4857]: I1128 15:14:05.582102 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-8685975d9c-mq499" event={"ID":"06b77386-fe69-408f-ba29-f70de0a73e6e","Type":"ContainerStarted","Data":"a79b57b896da25bf1f0feceb7c415175f6e98217c73c0b892258412dfc7b2186"} Nov 28 15:14:05 crc kubenswrapper[4857]: I1128 15:14:05.596146 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-55b678f96d-kk66g"] Nov 28 15:14:05 crc kubenswrapper[4857]: I1128 15:14:05.626334 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7c69887759-78cdn"] Nov 28 15:14:05 crc kubenswrapper[4857]: W1128 15:14:05.635085 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a42c08c_72d5_416f_8ee8_5cecfc36b4ab.slice/crio-e5645c41e02fc17c207f5f4240839d35891d18ae94501b8c8a60e41efa0c4973 WatchSource:0}: Error finding container e5645c41e02fc17c207f5f4240839d35891d18ae94501b8c8a60e41efa0c4973: Status 404 returned error can't find the container with id e5645c41e02fc17c207f5f4240839d35891d18ae94501b8c8a60e41efa0c4973 Nov 28 15:14:06 crc kubenswrapper[4857]: I1128 15:14:06.597890 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-8685975d9c-mq499" event={"ID":"06b77386-fe69-408f-ba29-f70de0a73e6e","Type":"ContainerStarted","Data":"1a7da0060282c9c9bbe2cc023128a7a4bee7bbb094112afec4a88be2d7cfee98"} Nov 28 15:14:06 crc kubenswrapper[4857]: I1128 15:14:06.598372 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:06 crc kubenswrapper[4857]: I1128 15:14:06.601750 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c69887759-78cdn" event={"ID":"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab","Type":"ContainerStarted","Data":"e5645c41e02fc17c207f5f4240839d35891d18ae94501b8c8a60e41efa0c4973"} Nov 28 15:14:06 crc kubenswrapper[4857]: I1128 15:14:06.603438 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55b678f96d-kk66g" event={"ID":"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c","Type":"ContainerStarted","Data":"62319749c7f287595d308053c2b158ac9b2983094cbccd3130e3b1f450cfadb7"} Nov 28 15:14:06 crc kubenswrapper[4857]: I1128 15:14:06.621214 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-8685975d9c-mq499" podStartSLOduration=2.621188917 podStartE2EDuration="2.621188917s" podCreationTimestamp="2025-11-28 15:14:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:14:06.614711924 +0000 UTC m=+6296.738653361" watchObservedRunningTime="2025-11-28 15:14:06.621188917 +0000 UTC m=+6296.745130354" Nov 28 15:14:10 crc kubenswrapper[4857]: I1128 15:14:10.660925 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7c69887759-78cdn" event={"ID":"9a42c08c-72d5-416f-8ee8-5cecfc36b4ab","Type":"ContainerStarted","Data":"51bb908116a995f7922b231fa7dcf95b1a15a9bc70b6ea01ca5ba96c16469c35"} Nov 28 15:14:10 crc kubenswrapper[4857]: I1128 15:14:10.662276 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:10 crc kubenswrapper[4857]: I1128 15:14:10.664712 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-55b678f96d-kk66g" event={"ID":"3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c","Type":"ContainerStarted","Data":"244fe981c89e5717da146dd4c810a4a9de7fbae127a3b13efffdc7bc00263164"} Nov 28 15:14:10 crc kubenswrapper[4857]: I1128 15:14:10.664854 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:10 crc kubenswrapper[4857]: I1128 15:14:10.700634 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7c69887759-78cdn" podStartSLOduration=2.946269989 podStartE2EDuration="6.70060514s" podCreationTimestamp="2025-11-28 15:14:04 +0000 UTC" firstStartedPulling="2025-11-28 15:14:05.638300743 +0000 UTC m=+6295.762242180" lastFinishedPulling="2025-11-28 15:14:09.392635894 +0000 UTC m=+6299.516577331" observedRunningTime="2025-11-28 15:14:10.695792021 +0000 UTC m=+6300.819733478" watchObservedRunningTime="2025-11-28 15:14:10.70060514 +0000 UTC m=+6300.824546577" Nov 28 15:14:10 crc kubenswrapper[4857]: I1128 15:14:10.734082 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-55b678f96d-kk66g" podStartSLOduration=2.970188578 podStartE2EDuration="6.734055403s" podCreationTimestamp="2025-11-28 15:14:04 +0000 UTC" firstStartedPulling="2025-11-28 15:14:05.622314996 +0000 UTC m=+6295.746256433" lastFinishedPulling="2025-11-28 15:14:09.386181821 +0000 UTC m=+6299.510123258" observedRunningTime="2025-11-28 15:14:10.727899849 +0000 UTC m=+6300.851841286" watchObservedRunningTime="2025-11-28 15:14:10.734055403 +0000 UTC m=+6300.857996840" Nov 28 15:14:11 crc kubenswrapper[4857]: I1128 15:14:11.010048 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:14:11 crc kubenswrapper[4857]: I1128 15:14:11.307973 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:14:11 crc kubenswrapper[4857]: I1128 15:14:11.308256 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:14:12 crc kubenswrapper[4857]: I1128 15:14:12.695377 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5669786d45-d96bq" Nov 28 15:14:12 crc kubenswrapper[4857]: I1128 15:14:12.758175 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-66cdcfc869-s88hx"] Nov 28 15:14:12 crc kubenswrapper[4857]: I1128 15:14:12.758458 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-66cdcfc869-s88hx" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon-log" containerID="cri-o://f58a5a2bcb297d26965ef57a0757d481ad8ad9333943bd5d5fa90f9fb903146d" gracePeriod=30 Nov 28 15:14:12 crc kubenswrapper[4857]: I1128 15:14:12.758531 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-66cdcfc869-s88hx" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" containerID="cri-o://6ac547a69886adda1571b0f0306313b35d1ede3c084fd37ee7ab09b390cffc1d" gracePeriod=30 Nov 28 15:14:16 crc kubenswrapper[4857]: I1128 15:14:16.669463 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-55b678f96d-kk66g" Nov 28 15:14:16 crc kubenswrapper[4857]: I1128 15:14:16.735768 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7c69887759-78cdn" Nov 28 15:14:16 crc kubenswrapper[4857]: I1128 15:14:16.738728 4857 generic.go:334] "Generic (PLEG): container finished" podID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerID="6ac547a69886adda1571b0f0306313b35d1ede3c084fd37ee7ab09b390cffc1d" exitCode=0 Nov 28 15:14:16 crc kubenswrapper[4857]: I1128 15:14:16.738759 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66cdcfc869-s88hx" event={"ID":"26ad8963-2e63-4a75-9de9-90e28f9153d5","Type":"ContainerDied","Data":"6ac547a69886adda1571b0f0306313b35d1ede3c084fd37ee7ab09b390cffc1d"} Nov 28 15:14:21 crc kubenswrapper[4857]: I1128 15:14:21.478515 4857 scope.go:117] "RemoveContainer" containerID="0a20cefbcbc36698209bd7c7348b6cb95cddf39ca0c2cf3ee24dcc1398f3ec01" Nov 28 15:14:21 crc kubenswrapper[4857]: I1128 15:14:21.506729 4857 scope.go:117] "RemoveContainer" containerID="eaaa6179ec6222b2ddb8c376a23f8fb4d957fc7b0fbfbf1e068804bbaf8b9cdb" Nov 28 15:14:21 crc kubenswrapper[4857]: I1128 15:14:21.566859 4857 scope.go:117] "RemoveContainer" containerID="dbe0531944fad816df0057021f927d051f121d14277fec85793b4feecb4dde3f" Nov 28 15:14:22 crc kubenswrapper[4857]: I1128 15:14:22.103225 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-66cdcfc869-s88hx" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 28 15:14:24 crc kubenswrapper[4857]: I1128 15:14:24.976044 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-8685975d9c-mq499" Nov 28 15:14:32 crc kubenswrapper[4857]: I1128 15:14:32.102360 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-66cdcfc869-s88hx" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 28 15:14:41 crc kubenswrapper[4857]: I1128 15:14:41.308386 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:14:41 crc kubenswrapper[4857]: I1128 15:14:41.309119 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:14:41 crc kubenswrapper[4857]: I1128 15:14:41.309289 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:14:41 crc kubenswrapper[4857]: I1128 15:14:41.310081 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cd65dc19359f44ea74d1e09fca4c6fc276670fdbfc5a599a66977a1d2ab62015"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:14:41 crc kubenswrapper[4857]: I1128 15:14:41.310151 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://cd65dc19359f44ea74d1e09fca4c6fc276670fdbfc5a599a66977a1d2ab62015" gracePeriod=600 Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.028492 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="cd65dc19359f44ea74d1e09fca4c6fc276670fdbfc5a599a66977a1d2ab62015" exitCode=0 Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.028581 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"cd65dc19359f44ea74d1e09fca4c6fc276670fdbfc5a599a66977a1d2ab62015"} Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.028838 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3"} Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.028872 4857 scope.go:117] "RemoveContainer" containerID="241be049ff6ed3f89c9bfd340177c2b5b57d6cca2ed8fa8917c7f312eb8c7ce1" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.103620 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-66cdcfc869-s88hx" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" probeResult="failure" output="Get \"http://10.217.1.114:8080/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.114:8080: connect: connection refused" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.103778 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.544424 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt"] Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.547534 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.550976 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.573154 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt"] Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.701743 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnzrn\" (UniqueName: \"kubernetes.io/projected/acf7905a-757e-4f16-a8bc-6ecbce935582-kube-api-access-xnzrn\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.701847 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.701924 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.805766 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnzrn\" (UniqueName: \"kubernetes.io/projected/acf7905a-757e-4f16-a8bc-6ecbce935582-kube-api-access-xnzrn\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.806545 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.806864 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.807505 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.807729 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.832475 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnzrn\" (UniqueName: \"kubernetes.io/projected/acf7905a-757e-4f16-a8bc-6ecbce935582-kube-api-access-xnzrn\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:42 crc kubenswrapper[4857]: I1128 15:14:42.877653 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.047068 4857 generic.go:334] "Generic (PLEG): container finished" podID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerID="f58a5a2bcb297d26965ef57a0757d481ad8ad9333943bd5d5fa90f9fb903146d" exitCode=137 Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.047160 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66cdcfc869-s88hx" event={"ID":"26ad8963-2e63-4a75-9de9-90e28f9153d5","Type":"ContainerDied","Data":"f58a5a2bcb297d26965ef57a0757d481ad8ad9333943bd5d5fa90f9fb903146d"} Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.270940 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.402976 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt"] Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.423991 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26ad8963-2e63-4a75-9de9-90e28f9153d5-horizon-secret-key\") pod \"26ad8963-2e63-4a75-9de9-90e28f9153d5\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.424120 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-scripts\") pod \"26ad8963-2e63-4a75-9de9-90e28f9153d5\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.424157 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-config-data\") pod \"26ad8963-2e63-4a75-9de9-90e28f9153d5\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.424260 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twxbk\" (UniqueName: \"kubernetes.io/projected/26ad8963-2e63-4a75-9de9-90e28f9153d5-kube-api-access-twxbk\") pod \"26ad8963-2e63-4a75-9de9-90e28f9153d5\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.424342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26ad8963-2e63-4a75-9de9-90e28f9153d5-logs\") pod \"26ad8963-2e63-4a75-9de9-90e28f9153d5\" (UID: \"26ad8963-2e63-4a75-9de9-90e28f9153d5\") " Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.425770 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26ad8963-2e63-4a75-9de9-90e28f9153d5-logs" (OuterVolumeSpecName: "logs") pod "26ad8963-2e63-4a75-9de9-90e28f9153d5" (UID: "26ad8963-2e63-4a75-9de9-90e28f9153d5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.429892 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/26ad8963-2e63-4a75-9de9-90e28f9153d5-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "26ad8963-2e63-4a75-9de9-90e28f9153d5" (UID: "26ad8963-2e63-4a75-9de9-90e28f9153d5"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.430874 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26ad8963-2e63-4a75-9de9-90e28f9153d5-kube-api-access-twxbk" (OuterVolumeSpecName: "kube-api-access-twxbk") pod "26ad8963-2e63-4a75-9de9-90e28f9153d5" (UID: "26ad8963-2e63-4a75-9de9-90e28f9153d5"). InnerVolumeSpecName "kube-api-access-twxbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.457409 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-scripts" (OuterVolumeSpecName: "scripts") pod "26ad8963-2e63-4a75-9de9-90e28f9153d5" (UID: "26ad8963-2e63-4a75-9de9-90e28f9153d5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.467810 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-config-data" (OuterVolumeSpecName: "config-data") pod "26ad8963-2e63-4a75-9de9-90e28f9153d5" (UID: "26ad8963-2e63-4a75-9de9-90e28f9153d5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.527329 4857 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/26ad8963-2e63-4a75-9de9-90e28f9153d5-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.527379 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.527394 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/26ad8963-2e63-4a75-9de9-90e28f9153d5-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.527407 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twxbk\" (UniqueName: \"kubernetes.io/projected/26ad8963-2e63-4a75-9de9-90e28f9153d5-kube-api-access-twxbk\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:43 crc kubenswrapper[4857]: I1128 15:14:43.527422 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/26ad8963-2e63-4a75-9de9-90e28f9153d5-logs\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.070989 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-66cdcfc869-s88hx" event={"ID":"26ad8963-2e63-4a75-9de9-90e28f9153d5","Type":"ContainerDied","Data":"0b7574c77bdd057708879337575f001f53967e076e1295e61dd70d6f9fa8cc8a"} Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.071364 4857 scope.go:117] "RemoveContainer" containerID="6ac547a69886adda1571b0f0306313b35d1ede3c084fd37ee7ab09b390cffc1d" Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.071067 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-66cdcfc869-s88hx" Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.075761 4857 generic.go:334] "Generic (PLEG): container finished" podID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerID="99e772499aa0cfa05907860645176f3f7c7064c403e635145854e89e20c0fb1d" exitCode=0 Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.075869 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" event={"ID":"acf7905a-757e-4f16-a8bc-6ecbce935582","Type":"ContainerDied","Data":"99e772499aa0cfa05907860645176f3f7c7064c403e635145854e89e20c0fb1d"} Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.075933 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" event={"ID":"acf7905a-757e-4f16-a8bc-6ecbce935582","Type":"ContainerStarted","Data":"aee0a9de228a57bb70127b0f3d04e87b5daa62ee2c2c46103b9f93e5efb9a975"} Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.150076 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-66cdcfc869-s88hx"] Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.162400 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-66cdcfc869-s88hx"] Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.243540 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" path="/var/lib/kubelet/pods/26ad8963-2e63-4a75-9de9-90e28f9153d5/volumes" Nov 28 15:14:44 crc kubenswrapper[4857]: I1128 15:14:44.298094 4857 scope.go:117] "RemoveContainer" containerID="f58a5a2bcb297d26965ef57a0757d481ad8ad9333943bd5d5fa90f9fb903146d" Nov 28 15:14:46 crc kubenswrapper[4857]: I1128 15:14:46.102536 4857 generic.go:334] "Generic (PLEG): container finished" podID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerID="dbd97aa9e265678477327a41019766825d5c9f8f4807f0d8312a98e7f0a81015" exitCode=0 Nov 28 15:14:46 crc kubenswrapper[4857]: I1128 15:14:46.103114 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" event={"ID":"acf7905a-757e-4f16-a8bc-6ecbce935582","Type":"ContainerDied","Data":"dbd97aa9e265678477327a41019766825d5c9f8f4807f0d8312a98e7f0a81015"} Nov 28 15:14:47 crc kubenswrapper[4857]: I1128 15:14:47.118216 4857 generic.go:334] "Generic (PLEG): container finished" podID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerID="2c9cc1b3d917cb8c29955cda84c938fbfe7c48571af1a730fef9b001a0451a2c" exitCode=0 Nov 28 15:14:47 crc kubenswrapper[4857]: I1128 15:14:47.118265 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" event={"ID":"acf7905a-757e-4f16-a8bc-6ecbce935582","Type":"ContainerDied","Data":"2c9cc1b3d917cb8c29955cda84c938fbfe7c48571af1a730fef9b001a0451a2c"} Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.552436 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.636067 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnzrn\" (UniqueName: \"kubernetes.io/projected/acf7905a-757e-4f16-a8bc-6ecbce935582-kube-api-access-xnzrn\") pod \"acf7905a-757e-4f16-a8bc-6ecbce935582\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.636279 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-bundle\") pod \"acf7905a-757e-4f16-a8bc-6ecbce935582\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.636364 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-util\") pod \"acf7905a-757e-4f16-a8bc-6ecbce935582\" (UID: \"acf7905a-757e-4f16-a8bc-6ecbce935582\") " Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.638634 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-bundle" (OuterVolumeSpecName: "bundle") pod "acf7905a-757e-4f16-a8bc-6ecbce935582" (UID: "acf7905a-757e-4f16-a8bc-6ecbce935582"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.641507 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acf7905a-757e-4f16-a8bc-6ecbce935582-kube-api-access-xnzrn" (OuterVolumeSpecName: "kube-api-access-xnzrn") pod "acf7905a-757e-4f16-a8bc-6ecbce935582" (UID: "acf7905a-757e-4f16-a8bc-6ecbce935582"). InnerVolumeSpecName "kube-api-access-xnzrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.646876 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-util" (OuterVolumeSpecName: "util") pod "acf7905a-757e-4f16-a8bc-6ecbce935582" (UID: "acf7905a-757e-4f16-a8bc-6ecbce935582"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.739598 4857 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.739638 4857 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/acf7905a-757e-4f16-a8bc-6ecbce935582-util\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:48 crc kubenswrapper[4857]: I1128 15:14:48.739648 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnzrn\" (UniqueName: \"kubernetes.io/projected/acf7905a-757e-4f16-a8bc-6ecbce935582-kube-api-access-xnzrn\") on node \"crc\" DevicePath \"\"" Nov 28 15:14:49 crc kubenswrapper[4857]: I1128 15:14:49.140200 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" event={"ID":"acf7905a-757e-4f16-a8bc-6ecbce935582","Type":"ContainerDied","Data":"aee0a9de228a57bb70127b0f3d04e87b5daa62ee2c2c46103b9f93e5efb9a975"} Nov 28 15:14:49 crc kubenswrapper[4857]: I1128 15:14:49.140524 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aee0a9de228a57bb70127b0f3d04e87b5daa62ee2c2c46103b9f93e5efb9a975" Nov 28 15:14:49 crc kubenswrapper[4857]: I1128 15:14:49.140288 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt" Nov 28 15:14:53 crc kubenswrapper[4857]: I1128 15:14:53.047592 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-jpt89"] Nov 28 15:14:53 crc kubenswrapper[4857]: I1128 15:14:53.064822 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-6h6vh"] Nov 28 15:14:53 crc kubenswrapper[4857]: I1128 15:14:53.078392 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-jpt89"] Nov 28 15:14:53 crc kubenswrapper[4857]: I1128 15:14:53.087755 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-6h6vh"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.056829 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-04d3-account-create-update-5cm48"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.068322 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-04d3-account-create-update-5cm48"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.081200 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-r2q5q"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.094296 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f25a-account-create-update-hzd8b"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.105603 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-903a-account-create-update-x8xm5"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.118567 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f25a-account-create-update-hzd8b"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.131290 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-903a-account-create-update-x8xm5"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.143365 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-r2q5q"] Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.241252 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19d127ed-bf35-493e-aeea-1103a89be2e7" path="/var/lib/kubelet/pods/19d127ed-bf35-493e-aeea-1103a89be2e7/volumes" Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.241845 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="357ad1be-4369-4bbd-89ac-57cc7eefcc78" path="/var/lib/kubelet/pods/357ad1be-4369-4bbd-89ac-57cc7eefcc78/volumes" Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.242508 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="705d6dbe-fab7-4f36-be33-c361ae31b8fb" path="/var/lib/kubelet/pods/705d6dbe-fab7-4f36-be33-c361ae31b8fb/volumes" Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.243269 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7140a79d-253c-4ac2-b9b2-4df19f81750d" path="/var/lib/kubelet/pods/7140a79d-253c-4ac2-b9b2-4df19f81750d/volumes" Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.244574 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4e90d47-02ed-49b5-8911-9d9aa2850884" path="/var/lib/kubelet/pods/a4e90d47-02ed-49b5-8911-9d9aa2850884/volumes" Nov 28 15:14:54 crc kubenswrapper[4857]: I1128 15:14:54.245938 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db5538ba-ffc9-488c-99f2-025ac358a4f6" path="/var/lib/kubelet/pods/db5538ba-ffc9-488c-99f2-025ac358a4f6/volumes" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.091035 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2"] Nov 28 15:15:00 crc kubenswrapper[4857]: E1128 15:15:00.092125 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon-log" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092141 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon-log" Nov 28 15:15:00 crc kubenswrapper[4857]: E1128 15:15:00.092153 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="extract" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092159 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="extract" Nov 28 15:15:00 crc kubenswrapper[4857]: E1128 15:15:00.092169 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="util" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092176 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="util" Nov 28 15:15:00 crc kubenswrapper[4857]: E1128 15:15:00.092195 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="pull" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092200 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="pull" Nov 28 15:15:00 crc kubenswrapper[4857]: E1128 15:15:00.092237 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092243 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092476 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon-log" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092507 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="26ad8963-2e63-4a75-9de9-90e28f9153d5" containerName="horizon" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.092523 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="acf7905a-757e-4f16-a8bc-6ecbce935582" containerName="extract" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.093291 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.095567 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-t4tgz" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.095737 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.097887 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.121783 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.149514 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.151035 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.154597 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-rxnl7" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.156875 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.160247 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.162032 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.207870 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/64b605ec-ff9f-4050-b8ae-37d88dec247e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j\" (UID: \"64b605ec-ff9f-4050-b8ae-37d88dec247e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.207918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c92g\" (UniqueName: \"kubernetes.io/projected/84754d36-7aff-4640-920a-a85f8af97445-kube-api-access-5c92g\") pod \"obo-prometheus-operator-668cf9dfbb-lcqs2\" (UID: \"84754d36-7aff-4640-920a-a85f8af97445\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.207960 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1ece95ae-d2f5-4c91-904a-945060d180a1-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn\" (UID: \"1ece95ae-d2f5-4c91-904a-945060d180a1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.208026 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1ece95ae-d2f5-4c91-904a-945060d180a1-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn\" (UID: \"1ece95ae-d2f5-4c91-904a-945060d180a1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.208066 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/64b605ec-ff9f-4050-b8ae-37d88dec247e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j\" (UID: \"64b605ec-ff9f-4050-b8ae-37d88dec247e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.209526 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.220334 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.225830 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.236876 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.261465 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.431777 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/64b605ec-ff9f-4050-b8ae-37d88dec247e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j\" (UID: \"64b605ec-ff9f-4050-b8ae-37d88dec247e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.432048 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c92g\" (UniqueName: \"kubernetes.io/projected/84754d36-7aff-4640-920a-a85f8af97445-kube-api-access-5c92g\") pod \"obo-prometheus-operator-668cf9dfbb-lcqs2\" (UID: \"84754d36-7aff-4640-920a-a85f8af97445\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.432143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1ece95ae-d2f5-4c91-904a-945060d180a1-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn\" (UID: \"1ece95ae-d2f5-4c91-904a-945060d180a1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.432374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1ece95ae-d2f5-4c91-904a-945060d180a1-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn\" (UID: \"1ece95ae-d2f5-4c91-904a-945060d180a1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.432582 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/64b605ec-ff9f-4050-b8ae-37d88dec247e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j\" (UID: \"64b605ec-ff9f-4050-b8ae-37d88dec247e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.434098 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.434145 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.464193 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/64b605ec-ff9f-4050-b8ae-37d88dec247e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j\" (UID: \"64b605ec-ff9f-4050-b8ae-37d88dec247e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.494544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c92g\" (UniqueName: \"kubernetes.io/projected/84754d36-7aff-4640-920a-a85f8af97445-kube-api-access-5c92g\") pod \"obo-prometheus-operator-668cf9dfbb-lcqs2\" (UID: \"84754d36-7aff-4640-920a-a85f8af97445\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.499831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/64b605ec-ff9f-4050-b8ae-37d88dec247e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j\" (UID: \"64b605ec-ff9f-4050-b8ae-37d88dec247e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.508800 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/1ece95ae-d2f5-4c91-904a-945060d180a1-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn\" (UID: \"1ece95ae-d2f5-4c91-904a-945060d180a1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.512422 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.512509 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/1ece95ae-d2f5-4c91-904a-945060d180a1-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn\" (UID: \"1ece95ae-d2f5-4c91-904a-945060d180a1\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.514286 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6cskd"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.518659 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.525516 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-5xpzb" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.526233 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.532799 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6cskd"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.535491 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f30be136-8e3c-486b-af61-239b0ade4181-config-volume\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.535579 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f30be136-8e3c-486b-af61-239b0ade4181-secret-volume\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.535710 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7c5hw\" (UniqueName: \"kubernetes.io/projected/f30be136-8e3c-486b-af61-239b0ade4181-kube-api-access-7c5hw\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.584590 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-ldvcd"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.586246 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.603093 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-cp5vd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.647191 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f30be136-8e3c-486b-af61-239b0ade4181-config-volume\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.647463 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/fc85d7fb-0ce3-43c2-ae33-706045cfd36d-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6cskd\" (UID: \"fc85d7fb-0ce3-43c2-ae33-706045cfd36d\") " pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.647528 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f30be136-8e3c-486b-af61-239b0ade4181-secret-volume\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.647638 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7c5hw\" (UniqueName: \"kubernetes.io/projected/f30be136-8e3c-486b-af61-239b0ade4181-kube-api-access-7c5hw\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.647662 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnphp\" (UniqueName: \"kubernetes.io/projected/fc85d7fb-0ce3-43c2-ae33-706045cfd36d-kube-api-access-cnphp\") pod \"observability-operator-d8bb48f5d-6cskd\" (UID: \"fc85d7fb-0ce3-43c2-ae33-706045cfd36d\") " pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.648653 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f30be136-8e3c-486b-af61-239b0ade4181-config-volume\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.663195 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f30be136-8e3c-486b-af61-239b0ade4181-secret-volume\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.678294 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7c5hw\" (UniqueName: \"kubernetes.io/projected/f30be136-8e3c-486b-af61-239b0ade4181-kube-api-access-7c5hw\") pod \"collect-profiles-29405715-mvlx9\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.685647 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-ldvcd"] Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.726736 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.750368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwzgs\" (UniqueName: \"kubernetes.io/projected/aeec4c3d-13d1-4feb-8b06-6867804d14f1-kube-api-access-hwzgs\") pod \"perses-operator-5446b9c989-ldvcd\" (UID: \"aeec4c3d-13d1-4feb-8b06-6867804d14f1\") " pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.750455 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/aeec4c3d-13d1-4feb-8b06-6867804d14f1-openshift-service-ca\") pod \"perses-operator-5446b9c989-ldvcd\" (UID: \"aeec4c3d-13d1-4feb-8b06-6867804d14f1\") " pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.750507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnphp\" (UniqueName: \"kubernetes.io/projected/fc85d7fb-0ce3-43c2-ae33-706045cfd36d-kube-api-access-cnphp\") pod \"observability-operator-d8bb48f5d-6cskd\" (UID: \"fc85d7fb-0ce3-43c2-ae33-706045cfd36d\") " pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.750556 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/fc85d7fb-0ce3-43c2-ae33-706045cfd36d-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6cskd\" (UID: \"fc85d7fb-0ce3-43c2-ae33-706045cfd36d\") " pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.760659 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/fc85d7fb-0ce3-43c2-ae33-706045cfd36d-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-6cskd\" (UID: \"fc85d7fb-0ce3-43c2-ae33-706045cfd36d\") " pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.781895 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnphp\" (UniqueName: \"kubernetes.io/projected/fc85d7fb-0ce3-43c2-ae33-706045cfd36d-kube-api-access-cnphp\") pod \"observability-operator-d8bb48f5d-6cskd\" (UID: \"fc85d7fb-0ce3-43c2-ae33-706045cfd36d\") " pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.792631 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.853453 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwzgs\" (UniqueName: \"kubernetes.io/projected/aeec4c3d-13d1-4feb-8b06-6867804d14f1-kube-api-access-hwzgs\") pod \"perses-operator-5446b9c989-ldvcd\" (UID: \"aeec4c3d-13d1-4feb-8b06-6867804d14f1\") " pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.853937 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/aeec4c3d-13d1-4feb-8b06-6867804d14f1-openshift-service-ca\") pod \"perses-operator-5446b9c989-ldvcd\" (UID: \"aeec4c3d-13d1-4feb-8b06-6867804d14f1\") " pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.857261 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/aeec4c3d-13d1-4feb-8b06-6867804d14f1-openshift-service-ca\") pod \"perses-operator-5446b9c989-ldvcd\" (UID: \"aeec4c3d-13d1-4feb-8b06-6867804d14f1\") " pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.885594 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwzgs\" (UniqueName: \"kubernetes.io/projected/aeec4c3d-13d1-4feb-8b06-6867804d14f1-kube-api-access-hwzgs\") pod \"perses-operator-5446b9c989-ldvcd\" (UID: \"aeec4c3d-13d1-4feb-8b06-6867804d14f1\") " pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:00 crc kubenswrapper[4857]: I1128 15:15:00.907612 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.036736 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.045254 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:01 crc kubenswrapper[4857]: W1128 15:15:01.267671 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod64b605ec_ff9f_4050_b8ae_37d88dec247e.slice/crio-d44bbfcbc75328f6c5a5e5aab35f82a5ef5c11c52941fece803e0993371d502a WatchSource:0}: Error finding container d44bbfcbc75328f6c5a5e5aab35f82a5ef5c11c52941fece803e0993371d502a: Status 404 returned error can't find the container with id d44bbfcbc75328f6c5a5e5aab35f82a5ef5c11c52941fece803e0993371d502a Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.273177 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j"] Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.322834 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" event={"ID":"64b605ec-ff9f-4050-b8ae-37d88dec247e","Type":"ContainerStarted","Data":"d44bbfcbc75328f6c5a5e5aab35f82a5ef5c11c52941fece803e0993371d502a"} Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.416349 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2"] Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.430852 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn"] Nov 28 15:15:01 crc kubenswrapper[4857]: W1128 15:15:01.619058 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf30be136_8e3c_486b_af61_239b0ade4181.slice/crio-b4b218e1aec139b3fe93ca869b3310ba9ad9c256d366f0c3db023c5d3fe2f801 WatchSource:0}: Error finding container b4b218e1aec139b3fe93ca869b3310ba9ad9c256d366f0c3db023c5d3fe2f801: Status 404 returned error can't find the container with id b4b218e1aec139b3fe93ca869b3310ba9ad9c256d366f0c3db023c5d3fe2f801 Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.620800 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9"] Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.668375 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-6cskd"] Nov 28 15:15:01 crc kubenswrapper[4857]: I1128 15:15:01.692081 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-ldvcd"] Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.342994 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" event={"ID":"fc85d7fb-0ce3-43c2-ae33-706045cfd36d","Type":"ContainerStarted","Data":"d4dc589da46d04a677fe07b073fdfabccb5ac9978616adad53ade89f9f4da7b6"} Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.353936 4857 generic.go:334] "Generic (PLEG): container finished" podID="f30be136-8e3c-486b-af61-239b0ade4181" containerID="7958381c3e9a0163e98916a4fb0ea5e926ec06a0a7de1d59eaf717df431354e6" exitCode=0 Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.354098 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" event={"ID":"f30be136-8e3c-486b-af61-239b0ade4181","Type":"ContainerDied","Data":"7958381c3e9a0163e98916a4fb0ea5e926ec06a0a7de1d59eaf717df431354e6"} Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.354148 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" event={"ID":"f30be136-8e3c-486b-af61-239b0ade4181","Type":"ContainerStarted","Data":"b4b218e1aec139b3fe93ca869b3310ba9ad9c256d366f0c3db023c5d3fe2f801"} Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.364106 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" event={"ID":"1ece95ae-d2f5-4c91-904a-945060d180a1","Type":"ContainerStarted","Data":"3755538dde07dabb80321273cf806f10b043ef4f4107cc1c64179f849cd8671a"} Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.365613 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" event={"ID":"84754d36-7aff-4640-920a-a85f8af97445","Type":"ContainerStarted","Data":"fdbbc43218ba189199b13701a6a3d767f70022fed20df0e605f1e21d65b251ce"} Nov 28 15:15:02 crc kubenswrapper[4857]: I1128 15:15:02.368490 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" event={"ID":"aeec4c3d-13d1-4feb-8b06-6867804d14f1","Type":"ContainerStarted","Data":"f453b11a7735bd497e4e7cff539dc694059982175c23dec8425c8f8fac299e27"} Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.799509 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.956386 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f30be136-8e3c-486b-af61-239b0ade4181-secret-volume\") pod \"f30be136-8e3c-486b-af61-239b0ade4181\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.956505 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f30be136-8e3c-486b-af61-239b0ade4181-config-volume\") pod \"f30be136-8e3c-486b-af61-239b0ade4181\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.956612 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c5hw\" (UniqueName: \"kubernetes.io/projected/f30be136-8e3c-486b-af61-239b0ade4181-kube-api-access-7c5hw\") pod \"f30be136-8e3c-486b-af61-239b0ade4181\" (UID: \"f30be136-8e3c-486b-af61-239b0ade4181\") " Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.957763 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f30be136-8e3c-486b-af61-239b0ade4181-config-volume" (OuterVolumeSpecName: "config-volume") pod "f30be136-8e3c-486b-af61-239b0ade4181" (UID: "f30be136-8e3c-486b-af61-239b0ade4181"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.965086 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f30be136-8e3c-486b-af61-239b0ade4181-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f30be136-8e3c-486b-af61-239b0ade4181" (UID: "f30be136-8e3c-486b-af61-239b0ade4181"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:15:03 crc kubenswrapper[4857]: I1128 15:15:03.966418 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f30be136-8e3c-486b-af61-239b0ade4181-kube-api-access-7c5hw" (OuterVolumeSpecName: "kube-api-access-7c5hw") pod "f30be136-8e3c-486b-af61-239b0ade4181" (UID: "f30be136-8e3c-486b-af61-239b0ade4181"). InnerVolumeSpecName "kube-api-access-7c5hw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.059735 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f30be136-8e3c-486b-af61-239b0ade4181-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.059784 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c5hw\" (UniqueName: \"kubernetes.io/projected/f30be136-8e3c-486b-af61-239b0ade4181-kube-api-access-7c5hw\") on node \"crc\" DevicePath \"\"" Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.059800 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f30be136-8e3c-486b-af61-239b0ade4181-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.389457 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" event={"ID":"f30be136-8e3c-486b-af61-239b0ade4181","Type":"ContainerDied","Data":"b4b218e1aec139b3fe93ca869b3310ba9ad9c256d366f0c3db023c5d3fe2f801"} Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.389959 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4b218e1aec139b3fe93ca869b3310ba9ad9c256d366f0c3db023c5d3fe2f801" Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.389518 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9" Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.887505 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d"] Nov 28 15:15:04 crc kubenswrapper[4857]: I1128 15:15:04.903306 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405670-nc94d"] Nov 28 15:15:06 crc kubenswrapper[4857]: I1128 15:15:06.255479 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0747847-802a-4a95-831a-301e1529a41b" path="/var/lib/kubelet/pods/f0747847-802a-4a95-831a-301e1529a41b/volumes" Nov 28 15:15:09 crc kubenswrapper[4857]: I1128 15:15:09.048669 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ctcbx"] Nov 28 15:15:09 crc kubenswrapper[4857]: I1128 15:15:09.064933 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-ctcbx"] Nov 28 15:15:10 crc kubenswrapper[4857]: I1128 15:15:10.271044 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9e701d2-f81d-475e-8cda-c4a598fd2032" path="/var/lib/kubelet/pods/a9e701d2-f81d-475e-8cda-c4a598fd2032/volumes" Nov 28 15:15:14 crc kubenswrapper[4857]: I1128 15:15:14.531076 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" event={"ID":"64b605ec-ff9f-4050-b8ae-37d88dec247e","Type":"ContainerStarted","Data":"6b923d720da30fbca07b4320a905c30320d1bea9cb3f8dd9b0da60237328fdd1"} Nov 28 15:15:14 crc kubenswrapper[4857]: I1128 15:15:14.570654 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j" podStartSLOduration=1.8155272340000002 podStartE2EDuration="14.570631861s" podCreationTimestamp="2025-11-28 15:15:00 +0000 UTC" firstStartedPulling="2025-11-28 15:15:01.273334851 +0000 UTC m=+6351.397276288" lastFinishedPulling="2025-11-28 15:15:14.028439488 +0000 UTC m=+6364.152380915" observedRunningTime="2025-11-28 15:15:14.567758304 +0000 UTC m=+6364.691699761" watchObservedRunningTime="2025-11-28 15:15:14.570631861 +0000 UTC m=+6364.694573308" Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.612072 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" event={"ID":"fc85d7fb-0ce3-43c2-ae33-706045cfd36d","Type":"ContainerStarted","Data":"2b6bec219f6afde5f2f3f9a0ed69c3891a647fb1569c4f593bf0254f85c1c680"} Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.612919 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.623993 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.624317 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" event={"ID":"1ece95ae-d2f5-4c91-904a-945060d180a1","Type":"ContainerStarted","Data":"b7acbd2529fc37c658d4d64b8ee8277a137cfb741554b5301c92c9640115a3fb"} Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.654100 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" event={"ID":"84754d36-7aff-4640-920a-a85f8af97445","Type":"ContainerStarted","Data":"14f00f77f77db0f4ad7f50902f0d04ad4d01ae645bb7728f5090a82fad770e35"} Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.679814 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" event={"ID":"aeec4c3d-13d1-4feb-8b06-6867804d14f1","Type":"ContainerStarted","Data":"c9d2be08cf242575763babd4c1e53bcd56f8e0d85d5428d9dd9f327b3ab3c126"} Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.681005 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.725922 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn" podStartSLOduration=3.229323156 podStartE2EDuration="15.725891028s" podCreationTimestamp="2025-11-28 15:15:00 +0000 UTC" firstStartedPulling="2025-11-28 15:15:01.409899069 +0000 UTC m=+6351.533840516" lastFinishedPulling="2025-11-28 15:15:13.906466951 +0000 UTC m=+6364.030408388" observedRunningTime="2025-11-28 15:15:15.706731056 +0000 UTC m=+6365.830672523" watchObservedRunningTime="2025-11-28 15:15:15.725891028 +0000 UTC m=+6365.849832465" Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.736939 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-6cskd" podStartSLOduration=3.379674433 podStartE2EDuration="15.736915913s" podCreationTimestamp="2025-11-28 15:15:00 +0000 UTC" firstStartedPulling="2025-11-28 15:15:01.727607845 +0000 UTC m=+6351.851549282" lastFinishedPulling="2025-11-28 15:15:14.084849325 +0000 UTC m=+6364.208790762" observedRunningTime="2025-11-28 15:15:15.659749411 +0000 UTC m=+6365.783690848" watchObservedRunningTime="2025-11-28 15:15:15.736915913 +0000 UTC m=+6365.860857350" Nov 28 15:15:15 crc kubenswrapper[4857]: I1128 15:15:15.803213 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-lcqs2" podStartSLOduration=3.138921382 podStartE2EDuration="15.803183593s" podCreationTimestamp="2025-11-28 15:15:00 +0000 UTC" firstStartedPulling="2025-11-28 15:15:01.420277776 +0000 UTC m=+6351.544219213" lastFinishedPulling="2025-11-28 15:15:14.084539987 +0000 UTC m=+6364.208481424" observedRunningTime="2025-11-28 15:15:15.786846296 +0000 UTC m=+6365.910787733" watchObservedRunningTime="2025-11-28 15:15:15.803183593 +0000 UTC m=+6365.927125030" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.052427 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.083044 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-ldvcd" podStartSLOduration=8.700267669 podStartE2EDuration="21.083022401s" podCreationTimestamp="2025-11-28 15:15:00 +0000 UTC" firstStartedPulling="2025-11-28 15:15:01.654913223 +0000 UTC m=+6351.778854660" lastFinishedPulling="2025-11-28 15:15:14.037667955 +0000 UTC m=+6364.161609392" observedRunningTime="2025-11-28 15:15:15.833381699 +0000 UTC m=+6365.957323136" watchObservedRunningTime="2025-11-28 15:15:21.083022401 +0000 UTC m=+6371.206963838" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.710215 4857 scope.go:117] "RemoveContainer" containerID="7d6bb09b5267e210f68b7288cab4761134713bfc172beef353db4c1e6aa5a18c" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.753742 4857 scope.go:117] "RemoveContainer" containerID="56474241f936993ef7e3a6e7b786e08337c4bef296b4797c0d36684df071a854" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.809121 4857 scope.go:117] "RemoveContainer" containerID="34309413075ac307ead93b48426fa847fedb3f47aa1521f92cbb3b87a992e16a" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.849187 4857 scope.go:117] "RemoveContainer" containerID="515cc291c680a8a395fc2cffdaf10e466d90e00c85eca6e2f772af8d3a0a94cd" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.892337 4857 scope.go:117] "RemoveContainer" containerID="3b8c4435d954477bed19b69cee5ff200b79a8620111a52046c1cf2082711f02c" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.955207 4857 scope.go:117] "RemoveContainer" containerID="959dec720acf8f4fc7b69c5f8a94f17d83b80c8aee3a5189fac23782e7ccadf0" Nov 28 15:15:21 crc kubenswrapper[4857]: I1128 15:15:21.998220 4857 scope.go:117] "RemoveContainer" containerID="29a8a2f4e184a8bb9e0ba03adbd89dafbc988e030b4b3a6226a7f13c0fdd91db" Nov 28 15:15:22 crc kubenswrapper[4857]: I1128 15:15:22.034870 4857 scope.go:117] "RemoveContainer" containerID="c72364aff2492138903cd90d5e46cd7989fe733d5810f9654591dbd733d8f814" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.067607 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.068255 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="04ee383b-d1a2-4c75-8482-10ed3d034049" containerName="openstackclient" containerID="cri-o://4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c" gracePeriod=2 Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.097923 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.196026 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 28 15:15:24 crc kubenswrapper[4857]: E1128 15:15:24.210308 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f30be136-8e3c-486b-af61-239b0ade4181" containerName="collect-profiles" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.210330 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f30be136-8e3c-486b-af61-239b0ade4181" containerName="collect-profiles" Nov 28 15:15:24 crc kubenswrapper[4857]: E1128 15:15:24.210368 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04ee383b-d1a2-4c75-8482-10ed3d034049" containerName="openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.210376 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="04ee383b-d1a2-4c75-8482-10ed3d034049" containerName="openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.210924 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f30be136-8e3c-486b-af61-239b0ade4181" containerName="collect-profiles" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.210986 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="04ee383b-d1a2-4c75-8482-10ed3d034049" containerName="openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.212448 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.259467 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="04ee383b-d1a2-4c75-8482-10ed3d034049" podUID="2571cbd5-f5c5-4525-a259-05351f59b9bb" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.340721 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.388537 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqlsn\" (UniqueName: \"kubernetes.io/projected/2571cbd5-f5c5-4525-a259-05351f59b9bb-kube-api-access-vqlsn\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.388636 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2571cbd5-f5c5-4525-a259-05351f59b9bb-openstack-config-secret\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.388698 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2571cbd5-f5c5-4525-a259-05351f59b9bb-openstack-config\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.442042 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.444218 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.450020 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-qkns4" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.473164 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.491070 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqlsn\" (UniqueName: \"kubernetes.io/projected/2571cbd5-f5c5-4525-a259-05351f59b9bb-kube-api-access-vqlsn\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.491136 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2571cbd5-f5c5-4525-a259-05351f59b9bb-openstack-config-secret\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.491184 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2571cbd5-f5c5-4525-a259-05351f59b9bb-openstack-config\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.492154 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2571cbd5-f5c5-4525-a259-05351f59b9bb-openstack-config\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.506823 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2571cbd5-f5c5-4525-a259-05351f59b9bb-openstack-config-secret\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.552413 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqlsn\" (UniqueName: \"kubernetes.io/projected/2571cbd5-f5c5-4525-a259-05351f59b9bb-kube-api-access-vqlsn\") pod \"openstackclient\" (UID: \"2571cbd5-f5c5-4525-a259-05351f59b9bb\") " pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.595811 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq7g9\" (UniqueName: \"kubernetes.io/projected/f174e778-1bd6-4685-a6de-af54e6fc3329-kube-api-access-xq7g9\") pod \"kube-state-metrics-0\" (UID: \"f174e778-1bd6-4685-a6de-af54e6fc3329\") " pod="openstack/kube-state-metrics-0" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.615059 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.700480 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq7g9\" (UniqueName: \"kubernetes.io/projected/f174e778-1bd6-4685-a6de-af54e6fc3329-kube-api-access-xq7g9\") pod \"kube-state-metrics-0\" (UID: \"f174e778-1bd6-4685-a6de-af54e6fc3329\") " pod="openstack/kube-state-metrics-0" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.730366 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq7g9\" (UniqueName: \"kubernetes.io/projected/f174e778-1bd6-4685-a6de-af54e6fc3329-kube-api-access-xq7g9\") pod \"kube-state-metrics-0\" (UID: \"f174e778-1bd6-4685-a6de-af54e6fc3329\") " pod="openstack/kube-state-metrics-0" Nov 28 15:15:24 crc kubenswrapper[4857]: I1128 15:15:24.765525 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.670175 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.856361 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.923198 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.940271 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2571cbd5-f5c5-4525-a259-05351f59b9bb","Type":"ContainerStarted","Data":"45f344bc4584f1b40f2aaf9b1b02843eeb3c29e16ee29da9a84e31408961ebb0"} Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.940408 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:25 crc kubenswrapper[4857]: W1128 15:15:25.947118 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf174e778_1bd6_4685_a6de_af54e6fc3329.slice/crio-f9be101a74579ed8684f1d90faf7d63ee1b45588aa97434f5081dbcca7e702cb WatchSource:0}: Error finding container f9be101a74579ed8684f1d90faf7d63ee1b45588aa97434f5081dbcca7e702cb: Status 404 returned error can't find the container with id f9be101a74579ed8684f1d90faf7d63ee1b45588aa97434f5081dbcca7e702cb Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.962324 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.962516 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.962592 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.962790 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-dwwnk" Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.964339 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.977425 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:15:25 crc kubenswrapper[4857]: I1128 15:15:25.979637 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.074966 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/95a5f00a-164f-43ea-8d38-0f9763cedfa4-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.075038 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/95a5f00a-164f-43ea-8d38-0f9763cedfa4-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.075737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.075810 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/95a5f00a-164f-43ea-8d38-0f9763cedfa4-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.075851 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.075913 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st62d\" (UniqueName: \"kubernetes.io/projected/95a5f00a-164f-43ea-8d38-0f9763cedfa4-kube-api-access-st62d\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.075995 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.093359 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.096241 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.098035 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.106932 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-cn256" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.107080 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.107201 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.107337 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.120511 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.148822 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.183302 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/95a5f00a-164f-43ea-8d38-0f9763cedfa4-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.183777 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.183816 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/95a5f00a-164f-43ea-8d38-0f9763cedfa4-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.183852 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.183907 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st62d\" (UniqueName: \"kubernetes.io/projected/95a5f00a-164f-43ea-8d38-0f9763cedfa4-kube-api-access-st62d\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.183960 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.184028 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/95a5f00a-164f-43ea-8d38-0f9763cedfa4-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.184553 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/95a5f00a-164f-43ea-8d38-0f9763cedfa4-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.194900 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/95a5f00a-164f-43ea-8d38-0f9763cedfa4-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.196527 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/95a5f00a-164f-43ea-8d38-0f9763cedfa4-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.197077 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.200600 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.208739 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/95a5f00a-164f-43ea-8d38-0f9763cedfa4-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.233799 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st62d\" (UniqueName: \"kubernetes.io/projected/95a5f00a-164f-43ea-8d38-0f9763cedfa4-kube-api-access-st62d\") pod \"alertmanager-metric-storage-0\" (UID: \"95a5f00a-164f-43ea-8d38-0f9763cedfa4\") " pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.283565 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288523 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/04369fad-6e83-4f50-be3e-e37f8c2d6b60-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288578 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288599 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/04369fad-6e83-4f50-be3e-e37f8c2d6b60-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288646 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwx6r\" (UniqueName: \"kubernetes.io/projected/04369fad-6e83-4f50-be3e-e37f8c2d6b60-kube-api-access-rwx6r\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288748 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-config\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288778 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/04369fad-6e83-4f50-be3e-e37f8c2d6b60-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288811 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.288841 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-c415ec0d-d607-43be-8396-d7497446797a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c415ec0d-d607-43be-8396-d7497446797a\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391168 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-c415ec0d-d607-43be-8396-d7497446797a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c415ec0d-d607-43be-8396-d7497446797a\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391324 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/04369fad-6e83-4f50-be3e-e37f8c2d6b60-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391365 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391400 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/04369fad-6e83-4f50-be3e-e37f8c2d6b60-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391463 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwx6r\" (UniqueName: \"kubernetes.io/projected/04369fad-6e83-4f50-be3e-e37f8c2d6b60-kube-api-access-rwx6r\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391643 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-config\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391703 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/04369fad-6e83-4f50-be3e-e37f8c2d6b60-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.391759 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.406034 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/04369fad-6e83-4f50-be3e-e37f8c2d6b60-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.408841 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.410235 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/04369fad-6e83-4f50-be3e-e37f8c2d6b60-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.413400 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/04369fad-6e83-4f50-be3e-e37f8c2d6b60-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.413810 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.450139 4857 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.450196 4857 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-c415ec0d-d607-43be-8396-d7497446797a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c415ec0d-d607-43be-8396-d7497446797a\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d59bc7a0acd8bd6397975a8ccf07d7dfd4541474412ad4c27db5a1b617b7fd6c/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.466482 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwx6r\" (UniqueName: \"kubernetes.io/projected/04369fad-6e83-4f50-be3e-e37f8c2d6b60-kube-api-access-rwx6r\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.467186 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/04369fad-6e83-4f50-be3e-e37f8c2d6b60-config\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.567700 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-c415ec0d-d607-43be-8396-d7497446797a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-c415ec0d-d607-43be-8396-d7497446797a\") pod \"prometheus-metric-storage-0\" (UID: \"04369fad-6e83-4f50-be3e-e37f8c2d6b60\") " pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.724049 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.893688 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.956893 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f174e778-1bd6-4685-a6de-af54e6fc3329","Type":"ContainerStarted","Data":"b71f23a2c013bff9748ea795a76b0d4dca3a194cc670e3817142a6697067456e"} Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.956963 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"f174e778-1bd6-4685-a6de-af54e6fc3329","Type":"ContainerStarted","Data":"f9be101a74579ed8684f1d90faf7d63ee1b45588aa97434f5081dbcca7e702cb"} Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.958943 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.963239 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2571cbd5-f5c5-4525-a259-05351f59b9bb","Type":"ContainerStarted","Data":"a24342079dce2b7827961535380ff4121382d2086d5170fd44354766ee8ba61d"} Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.971460 4857 generic.go:334] "Generic (PLEG): container finished" podID="04ee383b-d1a2-4c75-8482-10ed3d034049" containerID="4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c" exitCode=137 Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.971526 4857 scope.go:117] "RemoveContainer" containerID="4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c" Nov 28 15:15:26 crc kubenswrapper[4857]: I1128 15:15:26.971630 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.002103 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.002927 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.452827342 podStartE2EDuration="3.002904795s" podCreationTimestamp="2025-11-28 15:15:24 +0000 UTC" firstStartedPulling="2025-11-28 15:15:25.977149376 +0000 UTC m=+6376.101090813" lastFinishedPulling="2025-11-28 15:15:26.527226829 +0000 UTC m=+6376.651168266" observedRunningTime="2025-11-28 15:15:26.976303484 +0000 UTC m=+6377.100244921" watchObservedRunningTime="2025-11-28 15:15:27.002904795 +0000 UTC m=+6377.126846232" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.004971 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xx6x\" (UniqueName: \"kubernetes.io/projected/04ee383b-d1a2-4c75-8482-10ed3d034049-kube-api-access-8xx6x\") pod \"04ee383b-d1a2-4c75-8482-10ed3d034049\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.005092 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config\") pod \"04ee383b-d1a2-4c75-8482-10ed3d034049\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.005280 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config-secret\") pod \"04ee383b-d1a2-4c75-8482-10ed3d034049\" (UID: \"04ee383b-d1a2-4c75-8482-10ed3d034049\") " Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.018071 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04ee383b-d1a2-4c75-8482-10ed3d034049-kube-api-access-8xx6x" (OuterVolumeSpecName: "kube-api-access-8xx6x") pod "04ee383b-d1a2-4c75-8482-10ed3d034049" (UID: "04ee383b-d1a2-4c75-8482-10ed3d034049"). InnerVolumeSpecName "kube-api-access-8xx6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.046377 4857 scope.go:117] "RemoveContainer" containerID="4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c" Nov 28 15:15:27 crc kubenswrapper[4857]: E1128 15:15:27.049275 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c\": container with ID starting with 4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c not found: ID does not exist" containerID="4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.049373 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c"} err="failed to get container status \"4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c\": rpc error: code = NotFound desc = could not find container \"4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c\": container with ID starting with 4de5c3066710df6d76e7d00374c0159f7a2011f2a1c077231d5f07c172ca860c not found: ID does not exist" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.079514 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.07948462 podStartE2EDuration="3.07948462s" podCreationTimestamp="2025-11-28 15:15:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:15:27.000824799 +0000 UTC m=+6377.124766246" watchObservedRunningTime="2025-11-28 15:15:27.07948462 +0000 UTC m=+6377.203426057" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.110916 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xx6x\" (UniqueName: \"kubernetes.io/projected/04ee383b-d1a2-4c75-8482-10ed3d034049-kube-api-access-8xx6x\") on node \"crc\" DevicePath \"\"" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.122990 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "04ee383b-d1a2-4c75-8482-10ed3d034049" (UID: "04ee383b-d1a2-4c75-8482-10ed3d034049"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.132022 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "04ee383b-d1a2-4c75-8482-10ed3d034049" (UID: "04ee383b-d1a2-4c75-8482-10ed3d034049"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.213469 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.213506 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/04ee383b-d1a2-4c75-8482-10ed3d034049-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.315076 4857 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="04ee383b-d1a2-4c75-8482-10ed3d034049" podUID="2571cbd5-f5c5-4525-a259-05351f59b9bb" Nov 28 15:15:27 crc kubenswrapper[4857]: I1128 15:15:27.446705 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 28 15:15:28 crc kubenswrapper[4857]: I1128 15:15:28.009990 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"04369fad-6e83-4f50-be3e-e37f8c2d6b60","Type":"ContainerStarted","Data":"1f9140c57411384855c6c004d07f79d60e536b0906f8b8f40765ed9ca12ee470"} Nov 28 15:15:28 crc kubenswrapper[4857]: I1128 15:15:28.024711 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"95a5f00a-164f-43ea-8d38-0f9763cedfa4","Type":"ContainerStarted","Data":"dc87aa4b989c4753acda0c50af1b034cf4ba969da55fffa7b2041c62fe3552e4"} Nov 28 15:15:28 crc kubenswrapper[4857]: I1128 15:15:28.049720 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-99ghx"] Nov 28 15:15:28 crc kubenswrapper[4857]: I1128 15:15:28.068022 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-99ghx"] Nov 28 15:15:28 crc kubenswrapper[4857]: I1128 15:15:28.244623 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04ee383b-d1a2-4c75-8482-10ed3d034049" path="/var/lib/kubelet/pods/04ee383b-d1a2-4c75-8482-10ed3d034049/volumes" Nov 28 15:15:28 crc kubenswrapper[4857]: I1128 15:15:28.246439 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03" path="/var/lib/kubelet/pods/7e2d8fa3-8a07-4a67-b4ec-5fcd991afd03/volumes" Nov 28 15:15:29 crc kubenswrapper[4857]: I1128 15:15:29.120031 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-27hfs"] Nov 28 15:15:29 crc kubenswrapper[4857]: I1128 15:15:29.151683 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-27hfs"] Nov 28 15:15:30 crc kubenswrapper[4857]: I1128 15:15:30.252408 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c3aaa36-829f-4244-a40a-73810966b9ea" path="/var/lib/kubelet/pods/2c3aaa36-829f-4244-a40a-73810966b9ea/volumes" Nov 28 15:15:34 crc kubenswrapper[4857]: I1128 15:15:34.772494 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 28 15:15:35 crc kubenswrapper[4857]: I1128 15:15:35.134366 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"04369fad-6e83-4f50-be3e-e37f8c2d6b60","Type":"ContainerStarted","Data":"c4c350794b2c88ff914a85bbc80a7ea3d794bf4f645e587c7577430c13f57204"} Nov 28 15:15:35 crc kubenswrapper[4857]: I1128 15:15:35.136809 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"95a5f00a-164f-43ea-8d38-0f9763cedfa4","Type":"ContainerStarted","Data":"cd1fd8038a4e42445015242ed4b5d449f78d530ba64364624ef34e718c48ee58"} Nov 28 15:15:44 crc kubenswrapper[4857]: I1128 15:15:44.265723 4857 generic.go:334] "Generic (PLEG): container finished" podID="95a5f00a-164f-43ea-8d38-0f9763cedfa4" containerID="cd1fd8038a4e42445015242ed4b5d449f78d530ba64364624ef34e718c48ee58" exitCode=0 Nov 28 15:15:44 crc kubenswrapper[4857]: I1128 15:15:44.266076 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"95a5f00a-164f-43ea-8d38-0f9763cedfa4","Type":"ContainerDied","Data":"cd1fd8038a4e42445015242ed4b5d449f78d530ba64364624ef34e718c48ee58"} Nov 28 15:15:45 crc kubenswrapper[4857]: I1128 15:15:45.280537 4857 generic.go:334] "Generic (PLEG): container finished" podID="04369fad-6e83-4f50-be3e-e37f8c2d6b60" containerID="c4c350794b2c88ff914a85bbc80a7ea3d794bf4f645e587c7577430c13f57204" exitCode=0 Nov 28 15:15:45 crc kubenswrapper[4857]: I1128 15:15:45.280895 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"04369fad-6e83-4f50-be3e-e37f8c2d6b60","Type":"ContainerDied","Data":"c4c350794b2c88ff914a85bbc80a7ea3d794bf4f645e587c7577430c13f57204"} Nov 28 15:15:47 crc kubenswrapper[4857]: I1128 15:15:47.042932 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-nnqbh"] Nov 28 15:15:47 crc kubenswrapper[4857]: I1128 15:15:47.053313 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-nnqbh"] Nov 28 15:15:47 crc kubenswrapper[4857]: I1128 15:15:47.303402 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"95a5f00a-164f-43ea-8d38-0f9763cedfa4","Type":"ContainerStarted","Data":"2c20fca729d9ae2469733a222cb31154b4801be63988538cbe076990c097dc3e"} Nov 28 15:15:48 crc kubenswrapper[4857]: I1128 15:15:48.244466 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1424332-a4bd-46ca-9ca6-a2f9a0af9238" path="/var/lib/kubelet/pods/a1424332-a4bd-46ca-9ca6-a2f9a0af9238/volumes" Nov 28 15:15:51 crc kubenswrapper[4857]: I1128 15:15:51.347602 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"95a5f00a-164f-43ea-8d38-0f9763cedfa4","Type":"ContainerStarted","Data":"b5517888767295573a08ca1562b6e5bf7486af6960d8165c57fde2b6fd82aa0f"} Nov 28 15:15:51 crc kubenswrapper[4857]: I1128 15:15:51.348467 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:51 crc kubenswrapper[4857]: I1128 15:15:51.352567 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 28 15:15:51 crc kubenswrapper[4857]: I1128 15:15:51.373534 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=6.833010699 podStartE2EDuration="26.373512869s" podCreationTimestamp="2025-11-28 15:15:25 +0000 UTC" firstStartedPulling="2025-11-28 15:15:27.059732353 +0000 UTC m=+6377.183673790" lastFinishedPulling="2025-11-28 15:15:46.600234523 +0000 UTC m=+6396.724175960" observedRunningTime="2025-11-28 15:15:51.368501596 +0000 UTC m=+6401.492443053" watchObservedRunningTime="2025-11-28 15:15:51.373512869 +0000 UTC m=+6401.497454306" Nov 28 15:15:55 crc kubenswrapper[4857]: I1128 15:15:55.403278 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"04369fad-6e83-4f50-be3e-e37f8c2d6b60","Type":"ContainerStarted","Data":"2bc73d51803cf579c289d77bebb252df27972638d7d682b7b70cff916dade54b"} Nov 28 15:16:00 crc kubenswrapper[4857]: I1128 15:16:00.526105 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"04369fad-6e83-4f50-be3e-e37f8c2d6b60","Type":"ContainerStarted","Data":"faeae1ed9d8a6a07ac00bc9f448a2b660217add129ceacbbe648124e393ed8ee"} Nov 28 15:16:03 crc kubenswrapper[4857]: I1128 15:16:03.565204 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"04369fad-6e83-4f50-be3e-e37f8c2d6b60","Type":"ContainerStarted","Data":"ce48c991a81922a64c8ed518e884f793b2b4df5ef9eaf7b71b6a34e16362cc19"} Nov 28 15:16:03 crc kubenswrapper[4857]: I1128 15:16:03.605111 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.190762684 podStartE2EDuration="38.605077723s" podCreationTimestamp="2025-11-28 15:15:25 +0000 UTC" firstStartedPulling="2025-11-28 15:15:27.458402562 +0000 UTC m=+6377.582343999" lastFinishedPulling="2025-11-28 15:16:02.872717601 +0000 UTC m=+6412.996659038" observedRunningTime="2025-11-28 15:16:03.601890608 +0000 UTC m=+6413.725832085" watchObservedRunningTime="2025-11-28 15:16:03.605077723 +0000 UTC m=+6413.729019200" Nov 28 15:16:06 crc kubenswrapper[4857]: I1128 15:16:06.725184 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.384701 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.388237 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.392601 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.393685 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.396205 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.499885 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-run-httpd\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.500010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-config-data\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.500076 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.500114 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfsc5\" (UniqueName: \"kubernetes.io/projected/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-kube-api-access-jfsc5\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.500170 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-log-httpd\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.500426 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.500523 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-scripts\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602324 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-run-httpd\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602388 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-config-data\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602429 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602465 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfsc5\" (UniqueName: \"kubernetes.io/projected/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-kube-api-access-jfsc5\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602522 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-log-httpd\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602590 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.602618 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-scripts\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.603151 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-run-httpd\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.603270 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-log-httpd\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.611359 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-config-data\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.611427 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-scripts\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.611466 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.623335 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfsc5\" (UniqueName: \"kubernetes.io/projected/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-kube-api-access-jfsc5\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.626201 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " pod="openstack/ceilometer-0" Nov 28 15:16:09 crc kubenswrapper[4857]: I1128 15:16:09.724154 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:16:10 crc kubenswrapper[4857]: I1128 15:16:10.303768 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:10 crc kubenswrapper[4857]: I1128 15:16:10.668094 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerStarted","Data":"e89e5ea5b506a94dd9582d548e739b96d5e5f88f05c34b02671f29e8d6995232"} Nov 28 15:16:11 crc kubenswrapper[4857]: I1128 15:16:11.725492 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 28 15:16:11 crc kubenswrapper[4857]: I1128 15:16:11.729348 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 28 15:16:12 crc kubenswrapper[4857]: I1128 15:16:12.693456 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerStarted","Data":"3cf37b68ae118404e303a8ea13765f0330017b39ede37ec3471199e4292e0086"} Nov 28 15:16:12 crc kubenswrapper[4857]: I1128 15:16:12.695373 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 28 15:16:13 crc kubenswrapper[4857]: I1128 15:16:13.708638 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerStarted","Data":"900c563193bbd99eb8bedd21e00ed5e6bc5550a67cf048624dfd4f08a8a5e06b"} Nov 28 15:16:16 crc kubenswrapper[4857]: I1128 15:16:16.753651 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerStarted","Data":"9a9c66feaf593d3f58f0754ed79d458ffe23c7b06ae238cea81a11ef897bb57c"} Nov 28 15:16:17 crc kubenswrapper[4857]: I1128 15:16:17.767260 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerStarted","Data":"03bf10235d37f4ec05bdfe7b9acc70d19041c144f495972846b127abd33bb3f3"} Nov 28 15:16:17 crc kubenswrapper[4857]: I1128 15:16:17.769381 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:16:17 crc kubenswrapper[4857]: I1128 15:16:17.795372 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.7494386039999998 podStartE2EDuration="8.795348575s" podCreationTimestamp="2025-11-28 15:16:09 +0000 UTC" firstStartedPulling="2025-11-28 15:16:10.322610903 +0000 UTC m=+6420.446552340" lastFinishedPulling="2025-11-28 15:16:17.368520854 +0000 UTC m=+6427.492462311" observedRunningTime="2025-11-28 15:16:17.791100541 +0000 UTC m=+6427.915041978" watchObservedRunningTime="2025-11-28 15:16:17.795348575 +0000 UTC m=+6427.919290012" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.162808 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-qpksf"] Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.166224 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.176260 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-qpksf"] Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.256784 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-3ed0-account-create-update-wtjjl"] Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.258229 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3ed0-account-create-update-wtjjl"] Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.258332 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.261533 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.264106 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-operator-scripts\") pod \"aodh-db-create-qpksf\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.264185 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-operator-scripts\") pod \"aodh-3ed0-account-create-update-wtjjl\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.264309 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-869jn\" (UniqueName: \"kubernetes.io/projected/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-kube-api-access-869jn\") pod \"aodh-db-create-qpksf\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.264456 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kh9fw\" (UniqueName: \"kubernetes.io/projected/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-kube-api-access-kh9fw\") pod \"aodh-3ed0-account-create-update-wtjjl\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.304369 4857 scope.go:117] "RemoveContainer" containerID="226311157ec87d35666f9de9c325f469914d8459c2e76a3f9d3785851dfdec6f" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.342902 4857 scope.go:117] "RemoveContainer" containerID="9921428dcb2d5049ad9a65b16ce3bfd4f546d4f6fe729b54737b98ea91aa4f82" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.367437 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-operator-scripts\") pod \"aodh-db-create-qpksf\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.367535 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-operator-scripts\") pod \"aodh-3ed0-account-create-update-wtjjl\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.367667 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-869jn\" (UniqueName: \"kubernetes.io/projected/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-kube-api-access-869jn\") pod \"aodh-db-create-qpksf\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.367790 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kh9fw\" (UniqueName: \"kubernetes.io/projected/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-kube-api-access-kh9fw\") pod \"aodh-3ed0-account-create-update-wtjjl\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.368532 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-operator-scripts\") pod \"aodh-3ed0-account-create-update-wtjjl\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.370983 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-operator-scripts\") pod \"aodh-db-create-qpksf\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.393880 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kh9fw\" (UniqueName: \"kubernetes.io/projected/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-kube-api-access-kh9fw\") pod \"aodh-3ed0-account-create-update-wtjjl\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.394232 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-869jn\" (UniqueName: \"kubernetes.io/projected/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-kube-api-access-869jn\") pod \"aodh-db-create-qpksf\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.422089 4857 scope.go:117] "RemoveContainer" containerID="9261a792a7599a6ceb7722c629f10ec25b113c5f241ccbfc0384341deb7afe53" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.508099 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.539913 4857 scope.go:117] "RemoveContainer" containerID="40c4582a3db9563d73dd7bb2b12b84b0c9effbee30b40723e9e832dd7a3f9fee" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.586428 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:22 crc kubenswrapper[4857]: I1128 15:16:22.961608 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-qpksf"] Nov 28 15:16:23 crc kubenswrapper[4857]: I1128 15:16:23.339971 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-3ed0-account-create-update-wtjjl"] Nov 28 15:16:23 crc kubenswrapper[4857]: I1128 15:16:23.877599 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3ed0-account-create-update-wtjjl" event={"ID":"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73","Type":"ContainerStarted","Data":"be40611c85bcbd598b534d0f083a7507e79dc2e9595eddfc6723570eae04fc39"} Nov 28 15:16:23 crc kubenswrapper[4857]: I1128 15:16:23.879698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-qpksf" event={"ID":"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5","Type":"ContainerStarted","Data":"f31e52c8364dd90e4a515829e877d54527f4d56ca88e624157d0713908b7bfec"} Nov 28 15:16:25 crc kubenswrapper[4857]: I1128 15:16:25.923170 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3ed0-account-create-update-wtjjl" event={"ID":"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73","Type":"ContainerStarted","Data":"90d9b9163807207abd4fc4d3c4e517b7d88a9e24ca7a008f5300e344f56a7515"} Nov 28 15:16:25 crc kubenswrapper[4857]: I1128 15:16:25.927158 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-qpksf" event={"ID":"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5","Type":"ContainerStarted","Data":"c9723de64b7e6004df7af9003bb6f4faa2cccbd281b1b15903f1c13a73f1fa68"} Nov 28 15:16:25 crc kubenswrapper[4857]: I1128 15:16:25.962751 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-3ed0-account-create-update-wtjjl" podStartSLOduration=3.962719921 podStartE2EDuration="3.962719921s" podCreationTimestamp="2025-11-28 15:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:16:25.948484701 +0000 UTC m=+6436.072426138" watchObservedRunningTime="2025-11-28 15:16:25.962719921 +0000 UTC m=+6436.086661388" Nov 28 15:16:25 crc kubenswrapper[4857]: I1128 15:16:25.983067 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-qpksf" podStartSLOduration=3.983034593 podStartE2EDuration="3.983034593s" podCreationTimestamp="2025-11-28 15:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:16:25.974226968 +0000 UTC m=+6436.098168405" watchObservedRunningTime="2025-11-28 15:16:25.983034593 +0000 UTC m=+6436.106976040" Nov 28 15:16:26 crc kubenswrapper[4857]: I1128 15:16:26.943414 4857 generic.go:334] "Generic (PLEG): container finished" podID="46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" containerID="90d9b9163807207abd4fc4d3c4e517b7d88a9e24ca7a008f5300e344f56a7515" exitCode=0 Nov 28 15:16:26 crc kubenswrapper[4857]: I1128 15:16:26.944002 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3ed0-account-create-update-wtjjl" event={"ID":"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73","Type":"ContainerDied","Data":"90d9b9163807207abd4fc4d3c4e517b7d88a9e24ca7a008f5300e344f56a7515"} Nov 28 15:16:26 crc kubenswrapper[4857]: I1128 15:16:26.947582 4857 generic.go:334] "Generic (PLEG): container finished" podID="f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" containerID="c9723de64b7e6004df7af9003bb6f4faa2cccbd281b1b15903f1c13a73f1fa68" exitCode=0 Nov 28 15:16:26 crc kubenswrapper[4857]: I1128 15:16:26.947624 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-qpksf" event={"ID":"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5","Type":"ContainerDied","Data":"c9723de64b7e6004df7af9003bb6f4faa2cccbd281b1b15903f1c13a73f1fa68"} Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.473520 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.577504 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.657657 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-869jn\" (UniqueName: \"kubernetes.io/projected/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-kube-api-access-869jn\") pod \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.658015 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-operator-scripts\") pod \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\" (UID: \"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5\") " Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.658560 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" (UID: "f89f2fe7-8c04-4877-9ccd-7d94509fa1b5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.659129 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.664342 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-kube-api-access-869jn" (OuterVolumeSpecName: "kube-api-access-869jn") pod "f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" (UID: "f89f2fe7-8c04-4877-9ccd-7d94509fa1b5"). InnerVolumeSpecName "kube-api-access-869jn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.760717 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-operator-scripts\") pod \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.761156 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kh9fw\" (UniqueName: \"kubernetes.io/projected/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-kube-api-access-kh9fw\") pod \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\" (UID: \"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73\") " Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.761256 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" (UID: "46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.762158 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.762190 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-869jn\" (UniqueName: \"kubernetes.io/projected/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5-kube-api-access-869jn\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.765596 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-kube-api-access-kh9fw" (OuterVolumeSpecName: "kube-api-access-kh9fw") pod "46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" (UID: "46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73"). InnerVolumeSpecName "kube-api-access-kh9fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.865463 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kh9fw\" (UniqueName: \"kubernetes.io/projected/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73-kube-api-access-kh9fw\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.984126 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-3ed0-account-create-update-wtjjl" event={"ID":"46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73","Type":"ContainerDied","Data":"be40611c85bcbd598b534d0f083a7507e79dc2e9595eddfc6723570eae04fc39"} Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.985765 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be40611c85bcbd598b534d0f083a7507e79dc2e9595eddfc6723570eae04fc39" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.984468 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-3ed0-account-create-update-wtjjl" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.988011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-qpksf" event={"ID":"f89f2fe7-8c04-4877-9ccd-7d94509fa1b5","Type":"ContainerDied","Data":"f31e52c8364dd90e4a515829e877d54527f4d56ca88e624157d0713908b7bfec"} Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.988075 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f31e52c8364dd90e4a515829e877d54527f4d56ca88e624157d0713908b7bfec" Nov 28 15:16:28 crc kubenswrapper[4857]: I1128 15:16:28.988067 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-qpksf" Nov 28 15:16:31 crc kubenswrapper[4857]: I1128 15:16:31.073302 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-7tw52"] Nov 28 15:16:31 crc kubenswrapper[4857]: I1128 15:16:31.091280 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-c454-account-create-update-9schb"] Nov 28 15:16:31 crc kubenswrapper[4857]: I1128 15:16:31.107138 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-7tw52"] Nov 28 15:16:31 crc kubenswrapper[4857]: I1128 15:16:31.117111 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-c454-account-create-update-9schb"] Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.244502 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a49de8a5-e449-4cd2-976a-015e5c8e4362" path="/var/lib/kubelet/pods/a49de8a5-e449-4cd2-976a-015e5c8e4362/volumes" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.246728 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1" path="/var/lib/kubelet/pods/bd3a275e-912f-42e7-85b2-7e0ed8f6c2a1/volumes" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.765495 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-wclgf"] Nov 28 15:16:32 crc kubenswrapper[4857]: E1128 15:16:32.766168 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" containerName="mariadb-account-create-update" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.766194 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" containerName="mariadb-account-create-update" Nov 28 15:16:32 crc kubenswrapper[4857]: E1128 15:16:32.766236 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" containerName="mariadb-database-create" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.766246 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" containerName="mariadb-database-create" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.766553 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" containerName="mariadb-account-create-update" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.766596 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" containerName="mariadb-database-create" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.767713 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.770592 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-2rv55" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.770869 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.772714 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.772989 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.793808 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-wclgf"] Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.869608 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-scripts\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.870099 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-config-data\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.870152 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-combined-ca-bundle\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.870184 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5fqm\" (UniqueName: \"kubernetes.io/projected/b0961f51-514c-40dd-80aa-fcb46e84f2eb-kube-api-access-k5fqm\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.972030 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-config-data\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.972128 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-combined-ca-bundle\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.972176 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5fqm\" (UniqueName: \"kubernetes.io/projected/b0961f51-514c-40dd-80aa-fcb46e84f2eb-kube-api-access-k5fqm\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.972290 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-scripts\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.979368 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-combined-ca-bundle\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.991678 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-scripts\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:32 crc kubenswrapper[4857]: I1128 15:16:32.992448 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-config-data\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:33 crc kubenswrapper[4857]: I1128 15:16:33.002572 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5fqm\" (UniqueName: \"kubernetes.io/projected/b0961f51-514c-40dd-80aa-fcb46e84f2eb-kube-api-access-k5fqm\") pod \"aodh-db-sync-wclgf\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:33 crc kubenswrapper[4857]: I1128 15:16:33.141002 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:35 crc kubenswrapper[4857]: I1128 15:16:35.650332 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-wclgf"] Nov 28 15:16:36 crc kubenswrapper[4857]: I1128 15:16:36.068633 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wclgf" event={"ID":"b0961f51-514c-40dd-80aa-fcb46e84f2eb","Type":"ContainerStarted","Data":"5f63c3bad9429321617321a37fa49bfc4868558ea24ee7d8658fcf4793305da6"} Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.527785 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7pklh"] Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.531460 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.541173 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7pklh"] Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.637305 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-catalog-content\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.638159 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-utilities\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.638479 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t224n\" (UniqueName: \"kubernetes.io/projected/0f3985e8-1760-454f-bb07-709aa0645788-kube-api-access-t224n\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.741035 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t224n\" (UniqueName: \"kubernetes.io/projected/0f3985e8-1760-454f-bb07-709aa0645788-kube-api-access-t224n\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.741127 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-catalog-content\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.741244 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-utilities\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.741839 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-utilities\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.742041 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-catalog-content\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.760223 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t224n\" (UniqueName: \"kubernetes.io/projected/0f3985e8-1760-454f-bb07-709aa0645788-kube-api-access-t224n\") pod \"certified-operators-7pklh\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:38 crc kubenswrapper[4857]: I1128 15:16:38.919159 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:39 crc kubenswrapper[4857]: I1128 15:16:39.036722 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-8xp9l"] Nov 28 15:16:39 crc kubenswrapper[4857]: I1128 15:16:39.047983 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-8xp9l"] Nov 28 15:16:39 crc kubenswrapper[4857]: I1128 15:16:39.730331 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:16:40 crc kubenswrapper[4857]: I1128 15:16:40.246246 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="883a5e33-4994-4674-84fe-14c2d13671e9" path="/var/lib/kubelet/pods/883a5e33-4994-4674-84fe-14c2d13671e9/volumes" Nov 28 15:16:41 crc kubenswrapper[4857]: I1128 15:16:41.308579 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:16:41 crc kubenswrapper[4857]: I1128 15:16:41.309973 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:16:44 crc kubenswrapper[4857]: I1128 15:16:44.511801 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7pklh"] Nov 28 15:16:44 crc kubenswrapper[4857]: W1128 15:16:44.523531 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f3985e8_1760_454f_bb07_709aa0645788.slice/crio-b4c5b1dd6c605b7d87a9fb18a9dd73f59f81567f926670a48c264af7556c16a8 WatchSource:0}: Error finding container b4c5b1dd6c605b7d87a9fb18a9dd73f59f81567f926670a48c264af7556c16a8: Status 404 returned error can't find the container with id b4c5b1dd6c605b7d87a9fb18a9dd73f59f81567f926670a48c264af7556c16a8 Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.201312 4857 generic.go:334] "Generic (PLEG): container finished" podID="0f3985e8-1760-454f-bb07-709aa0645788" containerID="30b4358e11fa3b379adb371bb06f4f48e8e79a74f02f62ef616219b387085d2c" exitCode=0 Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.210928 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerDied","Data":"30b4358e11fa3b379adb371bb06f4f48e8e79a74f02f62ef616219b387085d2c"} Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.211851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerStarted","Data":"b4c5b1dd6c605b7d87a9fb18a9dd73f59f81567f926670a48c264af7556c16a8"} Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.216246 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wclgf" event={"ID":"b0961f51-514c-40dd-80aa-fcb46e84f2eb","Type":"ContainerStarted","Data":"66346b8f98e982e563dec02c14677968c523817998277684428e91e7942d88a8"} Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.297622 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-wclgf" podStartSLOduration=4.978980292 podStartE2EDuration="13.297596808s" podCreationTimestamp="2025-11-28 15:16:32 +0000 UTC" firstStartedPulling="2025-11-28 15:16:35.660441473 +0000 UTC m=+6445.784382930" lastFinishedPulling="2025-11-28 15:16:43.979057999 +0000 UTC m=+6454.102999446" observedRunningTime="2025-11-28 15:16:45.282248958 +0000 UTC m=+6455.406190435" watchObservedRunningTime="2025-11-28 15:16:45.297596808 +0000 UTC m=+6455.421538245" Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.745039 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-44vs5"] Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.786937 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.812687 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-44vs5"] Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.967267 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-catalog-content\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.967415 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfkfr\" (UniqueName: \"kubernetes.io/projected/3a50a352-ecd4-4e36-898f-f5bf63eb4543-kube-api-access-mfkfr\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:45 crc kubenswrapper[4857]: I1128 15:16:45.968292 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-utilities\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.070864 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-catalog-content\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.071390 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfkfr\" (UniqueName: \"kubernetes.io/projected/3a50a352-ecd4-4e36-898f-f5bf63eb4543-kube-api-access-mfkfr\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.071532 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-utilities\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.071860 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-catalog-content\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.072066 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-utilities\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.095095 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfkfr\" (UniqueName: \"kubernetes.io/projected/3a50a352-ecd4-4e36-898f-f5bf63eb4543-kube-api-access-mfkfr\") pod \"redhat-operators-44vs5\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.120683 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.261241 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerStarted","Data":"579870de69494000a97069af51b6f0e3704f460963ba0e9ee5fae9aa505611ac"} Nov 28 15:16:46 crc kubenswrapper[4857]: I1128 15:16:46.677178 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-44vs5"] Nov 28 15:16:47 crc kubenswrapper[4857]: I1128 15:16:47.259893 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerID="d7e416a48a49f4b6d30d19b271875b35bf84e51159da321857e9646cf5034e6d" exitCode=0 Nov 28 15:16:47 crc kubenswrapper[4857]: I1128 15:16:47.260114 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerDied","Data":"d7e416a48a49f4b6d30d19b271875b35bf84e51159da321857e9646cf5034e6d"} Nov 28 15:16:47 crc kubenswrapper[4857]: I1128 15:16:47.261011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerStarted","Data":"d3ae82c70c3a8005005278daff2977d349a670a46cb7f451f110ed17978bc816"} Nov 28 15:16:48 crc kubenswrapper[4857]: I1128 15:16:48.278600 4857 generic.go:334] "Generic (PLEG): container finished" podID="0f3985e8-1760-454f-bb07-709aa0645788" containerID="579870de69494000a97069af51b6f0e3704f460963ba0e9ee5fae9aa505611ac" exitCode=0 Nov 28 15:16:48 crc kubenswrapper[4857]: I1128 15:16:48.278730 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerDied","Data":"579870de69494000a97069af51b6f0e3704f460963ba0e9ee5fae9aa505611ac"} Nov 28 15:16:48 crc kubenswrapper[4857]: I1128 15:16:48.287022 4857 generic.go:334] "Generic (PLEG): container finished" podID="b0961f51-514c-40dd-80aa-fcb46e84f2eb" containerID="66346b8f98e982e563dec02c14677968c523817998277684428e91e7942d88a8" exitCode=0 Nov 28 15:16:48 crc kubenswrapper[4857]: I1128 15:16:48.287090 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wclgf" event={"ID":"b0961f51-514c-40dd-80aa-fcb46e84f2eb","Type":"ContainerDied","Data":"66346b8f98e982e563dec02c14677968c523817998277684428e91e7942d88a8"} Nov 28 15:16:49 crc kubenswrapper[4857]: I1128 15:16:49.304540 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerStarted","Data":"bded5eacbf700fa971d848a444cba13cf6a1df992df7398412a65df8a66e6463"} Nov 28 15:16:49 crc kubenswrapper[4857]: I1128 15:16:49.307960 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerStarted","Data":"7a1ba20e7c1e108386d0c5ab2ee6fda4ad46ebb0304a269f19ca78e5215b5aa3"} Nov 28 15:16:49 crc kubenswrapper[4857]: I1128 15:16:49.333832 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7pklh" podStartSLOduration=7.610221269 podStartE2EDuration="11.333205532s" podCreationTimestamp="2025-11-28 15:16:38 +0000 UTC" firstStartedPulling="2025-11-28 15:16:45.212917156 +0000 UTC m=+6455.336858623" lastFinishedPulling="2025-11-28 15:16:48.935901449 +0000 UTC m=+6459.059842886" observedRunningTime="2025-11-28 15:16:49.325470585 +0000 UTC m=+6459.449412032" watchObservedRunningTime="2025-11-28 15:16:49.333205532 +0000 UTC m=+6459.457147009" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.098260 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.127051 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-config-data\") pod \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.127270 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-scripts\") pod \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.127308 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-combined-ca-bundle\") pod \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.127463 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5fqm\" (UniqueName: \"kubernetes.io/projected/b0961f51-514c-40dd-80aa-fcb46e84f2eb-kube-api-access-k5fqm\") pod \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\" (UID: \"b0961f51-514c-40dd-80aa-fcb46e84f2eb\") " Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.319054 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-wclgf" event={"ID":"b0961f51-514c-40dd-80aa-fcb46e84f2eb","Type":"ContainerDied","Data":"5f63c3bad9429321617321a37fa49bfc4868558ea24ee7d8658fcf4793305da6"} Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.320127 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f63c3bad9429321617321a37fa49bfc4868558ea24ee7d8658fcf4793305da6" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.319067 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-wclgf" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.701416 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-scripts" (OuterVolumeSpecName: "scripts") pod "b0961f51-514c-40dd-80aa-fcb46e84f2eb" (UID: "b0961f51-514c-40dd-80aa-fcb46e84f2eb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.709183 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0961f51-514c-40dd-80aa-fcb46e84f2eb-kube-api-access-k5fqm" (OuterVolumeSpecName: "kube-api-access-k5fqm") pod "b0961f51-514c-40dd-80aa-fcb46e84f2eb" (UID: "b0961f51-514c-40dd-80aa-fcb46e84f2eb"). InnerVolumeSpecName "kube-api-access-k5fqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.747248 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.747301 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5fqm\" (UniqueName: \"kubernetes.io/projected/b0961f51-514c-40dd-80aa-fcb46e84f2eb-kube-api-access-k5fqm\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.751293 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0961f51-514c-40dd-80aa-fcb46e84f2eb" (UID: "b0961f51-514c-40dd-80aa-fcb46e84f2eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.754017 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-config-data" (OuterVolumeSpecName: "config-data") pod "b0961f51-514c-40dd-80aa-fcb46e84f2eb" (UID: "b0961f51-514c-40dd-80aa-fcb46e84f2eb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.849434 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:50 crc kubenswrapper[4857]: I1128 15:16:50.849498 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0961f51-514c-40dd-80aa-fcb46e84f2eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.739264 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 28 15:16:52 crc kubenswrapper[4857]: E1128 15:16:52.740096 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0961f51-514c-40dd-80aa-fcb46e84f2eb" containerName="aodh-db-sync" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.740114 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0961f51-514c-40dd-80aa-fcb46e84f2eb" containerName="aodh-db-sync" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.740368 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0961f51-514c-40dd-80aa-fcb46e84f2eb" containerName="aodh-db-sync" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.742745 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.745812 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.746105 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.746524 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-2rv55" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.753670 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.799575 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbmkr\" (UniqueName: \"kubernetes.io/projected/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-kube-api-access-zbmkr\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.799849 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-config-data\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.799955 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-combined-ca-bundle\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.800062 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-scripts\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.902375 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-scripts\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.902579 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbmkr\" (UniqueName: \"kubernetes.io/projected/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-kube-api-access-zbmkr\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.902685 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-config-data\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:52 crc kubenswrapper[4857]: I1128 15:16:52.902721 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-combined-ca-bundle\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:53 crc kubenswrapper[4857]: I1128 15:16:53.044080 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbmkr\" (UniqueName: \"kubernetes.io/projected/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-kube-api-access-zbmkr\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:53 crc kubenswrapper[4857]: I1128 15:16:53.051382 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-scripts\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:53 crc kubenswrapper[4857]: I1128 15:16:53.051824 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-combined-ca-bundle\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:53 crc kubenswrapper[4857]: I1128 15:16:53.052876 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f113ffd3-ebde-4e6a-b805-25abf9dc82dc-config-data\") pod \"aodh-0\" (UID: \"f113ffd3-ebde-4e6a-b805-25abf9dc82dc\") " pod="openstack/aodh-0" Nov 28 15:16:53 crc kubenswrapper[4857]: I1128 15:16:53.087664 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 28 15:16:53 crc kubenswrapper[4857]: I1128 15:16:53.788208 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 28 15:16:53 crc kubenswrapper[4857]: W1128 15:16:53.791613 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf113ffd3_ebde_4e6a_b805_25abf9dc82dc.slice/crio-4e456c5e9b8d33476d63364d23a430e450f54df79b907a861ba7574a69013d18 WatchSource:0}: Error finding container 4e456c5e9b8d33476d63364d23a430e450f54df79b907a861ba7574a69013d18: Status 404 returned error can't find the container with id 4e456c5e9b8d33476d63364d23a430e450f54df79b907a861ba7574a69013d18 Nov 28 15:16:54 crc kubenswrapper[4857]: I1128 15:16:54.385133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f113ffd3-ebde-4e6a-b805-25abf9dc82dc","Type":"ContainerStarted","Data":"4e456c5e9b8d33476d63364d23a430e450f54df79b907a861ba7574a69013d18"} Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.363465 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.364072 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-central-agent" containerID="cri-o://3cf37b68ae118404e303a8ea13765f0330017b39ede37ec3471199e4292e0086" gracePeriod=30 Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.364678 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="proxy-httpd" containerID="cri-o://03bf10235d37f4ec05bdfe7b9acc70d19041c144f495972846b127abd33bb3f3" gracePeriod=30 Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.364743 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="sg-core" containerID="cri-o://9a9c66feaf593d3f58f0754ed79d458ffe23c7b06ae238cea81a11ef897bb57c" gracePeriod=30 Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.364794 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-notification-agent" containerID="cri-o://900c563193bbd99eb8bedd21e00ed5e6bc5550a67cf048624dfd4f08a8a5e06b" gracePeriod=30 Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.408148 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerID="7a1ba20e7c1e108386d0c5ab2ee6fda4ad46ebb0304a269f19ca78e5215b5aa3" exitCode=0 Nov 28 15:16:55 crc kubenswrapper[4857]: I1128 15:16:55.408238 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerDied","Data":"7a1ba20e7c1e108386d0c5ab2ee6fda4ad46ebb0304a269f19ca78e5215b5aa3"} Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.421083 4857 generic.go:334] "Generic (PLEG): container finished" podID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerID="03bf10235d37f4ec05bdfe7b9acc70d19041c144f495972846b127abd33bb3f3" exitCode=0 Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.421654 4857 generic.go:334] "Generic (PLEG): container finished" podID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerID="9a9c66feaf593d3f58f0754ed79d458ffe23c7b06ae238cea81a11ef897bb57c" exitCode=2 Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.421662 4857 generic.go:334] "Generic (PLEG): container finished" podID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerID="3cf37b68ae118404e303a8ea13765f0330017b39ede37ec3471199e4292e0086" exitCode=0 Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.421701 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerDied","Data":"03bf10235d37f4ec05bdfe7b9acc70d19041c144f495972846b127abd33bb3f3"} Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.421730 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerDied","Data":"9a9c66feaf593d3f58f0754ed79d458ffe23c7b06ae238cea81a11ef897bb57c"} Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.421741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerDied","Data":"3cf37b68ae118404e303a8ea13765f0330017b39ede37ec3471199e4292e0086"} Nov 28 15:16:56 crc kubenswrapper[4857]: I1128 15:16:56.423931 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f113ffd3-ebde-4e6a-b805-25abf9dc82dc","Type":"ContainerStarted","Data":"a2491830d649828cd4f44c280922d98511dcd6d19d2d2fd1215cf355742eda9d"} Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.438779 4857 generic.go:334] "Generic (PLEG): container finished" podID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerID="900c563193bbd99eb8bedd21e00ed5e6bc5550a67cf048624dfd4f08a8a5e06b" exitCode=0 Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.438851 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerDied","Data":"900c563193bbd99eb8bedd21e00ed5e6bc5550a67cf048624dfd4f08a8a5e06b"} Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.442998 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerStarted","Data":"ec795c6475e66102dac997198e0bc0d615cf8bf566be15ffa61305e036641637"} Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.465679 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-44vs5" podStartSLOduration=3.09877796 podStartE2EDuration="12.465660365s" podCreationTimestamp="2025-11-28 15:16:45 +0000 UTC" firstStartedPulling="2025-11-28 15:16:47.26167068 +0000 UTC m=+6457.385612117" lastFinishedPulling="2025-11-28 15:16:56.628553085 +0000 UTC m=+6466.752494522" observedRunningTime="2025-11-28 15:16:57.459639454 +0000 UTC m=+6467.583580891" watchObservedRunningTime="2025-11-28 15:16:57.465660365 +0000 UTC m=+6467.589601802" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.695377 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.817073 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-config-data\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.817312 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-combined-ca-bundle\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.817404 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-run-httpd\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.817475 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-scripts\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.817530 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-sg-core-conf-yaml\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.817584 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfsc5\" (UniqueName: \"kubernetes.io/projected/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-kube-api-access-jfsc5\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.818365 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.818972 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-log-httpd\") pod \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\" (UID: \"a845b9db-9faa-4cce-81ac-bbc703d4a9dc\") " Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.819712 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.820197 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.822406 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-scripts" (OuterVolumeSpecName: "scripts") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.831006 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-kube-api-access-jfsc5" (OuterVolumeSpecName: "kube-api-access-jfsc5") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "kube-api-access-jfsc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.878240 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.923462 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.923500 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.923513 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.923523 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfsc5\" (UniqueName: \"kubernetes.io/projected/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-kube-api-access-jfsc5\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.941195 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:57 crc kubenswrapper[4857]: I1128 15:16:57.985829 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-config-data" (OuterVolumeSpecName: "config-data") pod "a845b9db-9faa-4cce-81ac-bbc703d4a9dc" (UID: "a845b9db-9faa-4cce-81ac-bbc703d4a9dc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.025303 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.025343 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a845b9db-9faa-4cce-81ac-bbc703d4a9dc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.459369 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.459472 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a845b9db-9faa-4cce-81ac-bbc703d4a9dc","Type":"ContainerDied","Data":"e89e5ea5b506a94dd9582d548e739b96d5e5f88f05c34b02671f29e8d6995232"} Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.459560 4857 scope.go:117] "RemoveContainer" containerID="03bf10235d37f4ec05bdfe7b9acc70d19041c144f495972846b127abd33bb3f3" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.466341 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f113ffd3-ebde-4e6a-b805-25abf9dc82dc","Type":"ContainerStarted","Data":"c6f33f132c232c40e3ed245baa6e92024c8fa60449a4be802a916d10c215483f"} Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.498487 4857 scope.go:117] "RemoveContainer" containerID="9a9c66feaf593d3f58f0754ed79d458ffe23c7b06ae238cea81a11ef897bb57c" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.517854 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.529498 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.537632 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:58 crc kubenswrapper[4857]: E1128 15:16:58.538184 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-central-agent" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538205 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-central-agent" Nov 28 15:16:58 crc kubenswrapper[4857]: E1128 15:16:58.538241 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="proxy-httpd" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538249 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="proxy-httpd" Nov 28 15:16:58 crc kubenswrapper[4857]: E1128 15:16:58.538271 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-notification-agent" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538280 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-notification-agent" Nov 28 15:16:58 crc kubenswrapper[4857]: E1128 15:16:58.538319 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="sg-core" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538326 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="sg-core" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538536 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="sg-core" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538549 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-notification-agent" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538570 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="proxy-httpd" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.538583 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" containerName="ceilometer-central-agent" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.540963 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.545473 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.545831 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.554080 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.561694 4857 scope.go:117] "RemoveContainer" containerID="900c563193bbd99eb8bedd21e00ed5e6bc5550a67cf048624dfd4f08a8a5e06b" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641087 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-run-httpd\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641201 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641238 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-config-data\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641283 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-log-httpd\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641348 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-scripts\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641378 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.641399 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncr4g\" (UniqueName: \"kubernetes.io/projected/aa4d8101-f823-4bc9-8042-57dd8c70af27-kube-api-access-ncr4g\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.669351 4857 scope.go:117] "RemoveContainer" containerID="3cf37b68ae118404e303a8ea13765f0330017b39ede37ec3471199e4292e0086" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743143 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncr4g\" (UniqueName: \"kubernetes.io/projected/aa4d8101-f823-4bc9-8042-57dd8c70af27-kube-api-access-ncr4g\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743285 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-run-httpd\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743351 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743385 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-config-data\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743429 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-log-httpd\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.743486 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-scripts\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.744663 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-run-httpd\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.745849 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-log-httpd\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.750434 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.751284 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-scripts\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.755883 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-config-data\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.757651 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.765166 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncr4g\" (UniqueName: \"kubernetes.io/projected/aa4d8101-f823-4bc9-8042-57dd8c70af27-kube-api-access-ncr4g\") pod \"ceilometer-0\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.860462 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.919352 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.919421 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:58 crc kubenswrapper[4857]: I1128 15:16:58.985030 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.201310 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b599l"] Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.204474 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.219924 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b599l"] Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.359399 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6twv7\" (UniqueName: \"kubernetes.io/projected/cb7a99d7-5bcd-475b-b433-943e8e1cca19-kube-api-access-6twv7\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.359526 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-utilities\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.359676 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-catalog-content\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.394273 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:16:59 crc kubenswrapper[4857]: W1128 15:16:59.399134 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaa4d8101_f823_4bc9_8042_57dd8c70af27.slice/crio-31976e9e81f79451c70b9a5e71efd22ed765ded0557cc09c7d18d92d391696da WatchSource:0}: Error finding container 31976e9e81f79451c70b9a5e71efd22ed765ded0557cc09c7d18d92d391696da: Status 404 returned error can't find the container with id 31976e9e81f79451c70b9a5e71efd22ed765ded0557cc09c7d18d92d391696da Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.463740 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6twv7\" (UniqueName: \"kubernetes.io/projected/cb7a99d7-5bcd-475b-b433-943e8e1cca19-kube-api-access-6twv7\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.464275 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-utilities\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.464374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-catalog-content\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.465055 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-catalog-content\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.465471 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-utilities\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.484866 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerStarted","Data":"31976e9e81f79451c70b9a5e71efd22ed765ded0557cc09c7d18d92d391696da"} Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.491001 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6twv7\" (UniqueName: \"kubernetes.io/projected/cb7a99d7-5bcd-475b-b433-943e8e1cca19-kube-api-access-6twv7\") pod \"community-operators-b599l\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.533132 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:16:59 crc kubenswrapper[4857]: I1128 15:16:59.565532 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:17:00 crc kubenswrapper[4857]: I1128 15:17:00.261524 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a845b9db-9faa-4cce-81ac-bbc703d4a9dc" path="/var/lib/kubelet/pods/a845b9db-9faa-4cce-81ac-bbc703d4a9dc/volumes" Nov 28 15:17:00 crc kubenswrapper[4857]: I1128 15:17:00.399378 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b599l"] Nov 28 15:17:00 crc kubenswrapper[4857]: W1128 15:17:00.411233 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb7a99d7_5bcd_475b_b433_943e8e1cca19.slice/crio-e5bcb4323c6f13d011aa52a6481520fa211b40b093157f18c9bfc930e794eb79 WatchSource:0}: Error finding container e5bcb4323c6f13d011aa52a6481520fa211b40b093157f18c9bfc930e794eb79: Status 404 returned error can't find the container with id e5bcb4323c6f13d011aa52a6481520fa211b40b093157f18c9bfc930e794eb79 Nov 28 15:17:00 crc kubenswrapper[4857]: I1128 15:17:00.505983 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerStarted","Data":"aaf55408d02a69651e640ca6158cbcb2120b129ecabe87cf828d559c4b6657c1"} Nov 28 15:17:00 crc kubenswrapper[4857]: I1128 15:17:00.508539 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f113ffd3-ebde-4e6a-b805-25abf9dc82dc","Type":"ContainerStarted","Data":"a0d178a348bb771f1e6768f934867ea0a1b613c370340fc6bdc1e1426f3315e6"} Nov 28 15:17:00 crc kubenswrapper[4857]: I1128 15:17:00.510568 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerStarted","Data":"e5bcb4323c6f13d011aa52a6481520fa211b40b093157f18c9bfc930e794eb79"} Nov 28 15:17:01 crc kubenswrapper[4857]: I1128 15:17:01.522590 4857 generic.go:334] "Generic (PLEG): container finished" podID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerID="0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626" exitCode=0 Nov 28 15:17:01 crc kubenswrapper[4857]: I1128 15:17:01.522714 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerDied","Data":"0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626"} Nov 28 15:17:01 crc kubenswrapper[4857]: I1128 15:17:01.534769 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerStarted","Data":"06898a54470bd38c2b2f99a5454dc46b10250484593d05c664daeb8989cbdfdd"} Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.188454 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7pklh"] Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.188712 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7pklh" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="registry-server" containerID="cri-o://bded5eacbf700fa971d848a444cba13cf6a1df992df7398412a65df8a66e6463" gracePeriod=2 Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.554623 4857 generic.go:334] "Generic (PLEG): container finished" podID="0f3985e8-1760-454f-bb07-709aa0645788" containerID="bded5eacbf700fa971d848a444cba13cf6a1df992df7398412a65df8a66e6463" exitCode=0 Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.555622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerDied","Data":"bded5eacbf700fa971d848a444cba13cf6a1df992df7398412a65df8a66e6463"} Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.852774 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.962108 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-catalog-content\") pod \"0f3985e8-1760-454f-bb07-709aa0645788\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.962262 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t224n\" (UniqueName: \"kubernetes.io/projected/0f3985e8-1760-454f-bb07-709aa0645788-kube-api-access-t224n\") pod \"0f3985e8-1760-454f-bb07-709aa0645788\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.962577 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-utilities\") pod \"0f3985e8-1760-454f-bb07-709aa0645788\" (UID: \"0f3985e8-1760-454f-bb07-709aa0645788\") " Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.963462 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-utilities" (OuterVolumeSpecName: "utilities") pod "0f3985e8-1760-454f-bb07-709aa0645788" (UID: "0f3985e8-1760-454f-bb07-709aa0645788"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:02 crc kubenswrapper[4857]: I1128 15:17:02.970654 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f3985e8-1760-454f-bb07-709aa0645788-kube-api-access-t224n" (OuterVolumeSpecName: "kube-api-access-t224n") pod "0f3985e8-1760-454f-bb07-709aa0645788" (UID: "0f3985e8-1760-454f-bb07-709aa0645788"). InnerVolumeSpecName "kube-api-access-t224n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.034167 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0f3985e8-1760-454f-bb07-709aa0645788" (UID: "0f3985e8-1760-454f-bb07-709aa0645788"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.066995 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.067062 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t224n\" (UniqueName: \"kubernetes.io/projected/0f3985e8-1760-454f-bb07-709aa0645788-kube-api-access-t224n\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.067076 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f3985e8-1760-454f-bb07-709aa0645788-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.578690 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"f113ffd3-ebde-4e6a-b805-25abf9dc82dc","Type":"ContainerStarted","Data":"ccafe309df23938867bdea50ed9190089f279276df75d26dfe8b6eab3e5a14ba"} Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.582662 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7pklh" event={"ID":"0f3985e8-1760-454f-bb07-709aa0645788","Type":"ContainerDied","Data":"b4c5b1dd6c605b7d87a9fb18a9dd73f59f81567f926670a48c264af7556c16a8"} Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.582732 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7pklh" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.582849 4857 scope.go:117] "RemoveContainer" containerID="bded5eacbf700fa971d848a444cba13cf6a1df992df7398412a65df8a66e6463" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.611577 4857 scope.go:117] "RemoveContainer" containerID="579870de69494000a97069af51b6f0e3704f460963ba0e9ee5fae9aa505611ac" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.649515 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7pklh"] Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.663489 4857 scope.go:117] "RemoveContainer" containerID="30b4358e11fa3b379adb371bb06f4f48e8e79a74f02f62ef616219b387085d2c" Nov 28 15:17:03 crc kubenswrapper[4857]: I1128 15:17:03.667926 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7pklh"] Nov 28 15:17:04 crc kubenswrapper[4857]: I1128 15:17:04.250629 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f3985e8-1760-454f-bb07-709aa0645788" path="/var/lib/kubelet/pods/0f3985e8-1760-454f-bb07-709aa0645788/volumes" Nov 28 15:17:04 crc kubenswrapper[4857]: I1128 15:17:04.597236 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerStarted","Data":"c476c9b6874639293a506752a48ab47d73f550024729b6bc830d0fdfeb201722"} Nov 28 15:17:04 crc kubenswrapper[4857]: I1128 15:17:04.602172 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerStarted","Data":"2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc"} Nov 28 15:17:04 crc kubenswrapper[4857]: I1128 15:17:04.639789 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=4.204842169 podStartE2EDuration="12.63975167s" podCreationTimestamp="2025-11-28 15:16:52 +0000 UTC" firstStartedPulling="2025-11-28 15:16:53.795179934 +0000 UTC m=+6463.919121381" lastFinishedPulling="2025-11-28 15:17:02.230089445 +0000 UTC m=+6472.354030882" observedRunningTime="2025-11-28 15:17:04.626015013 +0000 UTC m=+6474.749956470" watchObservedRunningTime="2025-11-28 15:17:04.63975167 +0000 UTC m=+6474.763693107" Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.121685 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.123074 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.189356 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.630269 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerStarted","Data":"1bcdbe82eec9679518b5db412823dabed0acea0c5b52fa944ee72c0e314d0470"} Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.631888 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.634838 4857 generic.go:334] "Generic (PLEG): container finished" podID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerID="2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc" exitCode=0 Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.636011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerDied","Data":"2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc"} Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.678477 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8348056160000001 podStartE2EDuration="8.678450445s" podCreationTimestamp="2025-11-28 15:16:58 +0000 UTC" firstStartedPulling="2025-11-28 15:16:59.401844121 +0000 UTC m=+6469.525785558" lastFinishedPulling="2025-11-28 15:17:06.24548895 +0000 UTC m=+6476.369430387" observedRunningTime="2025-11-28 15:17:06.663230588 +0000 UTC m=+6476.787172065" watchObservedRunningTime="2025-11-28 15:17:06.678450445 +0000 UTC m=+6476.802391882" Nov 28 15:17:06 crc kubenswrapper[4857]: I1128 15:17:06.719703 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:17:07 crc kubenswrapper[4857]: I1128 15:17:07.657514 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerStarted","Data":"b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978"} Nov 28 15:17:07 crc kubenswrapper[4857]: I1128 15:17:07.699060 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b599l" podStartSLOduration=3.163619151 podStartE2EDuration="8.699032105s" podCreationTimestamp="2025-11-28 15:16:59 +0000 UTC" firstStartedPulling="2025-11-28 15:17:01.532720499 +0000 UTC m=+6471.656661936" lastFinishedPulling="2025-11-28 15:17:07.068133453 +0000 UTC m=+6477.192074890" observedRunningTime="2025-11-28 15:17:07.684066416 +0000 UTC m=+6477.808007863" watchObservedRunningTime="2025-11-28 15:17:07.699032105 +0000 UTC m=+6477.822973552" Nov 28 15:17:08 crc kubenswrapper[4857]: I1128 15:17:08.603488 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-44vs5"] Nov 28 15:17:09 crc kubenswrapper[4857]: I1128 15:17:09.534077 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:17:09 crc kubenswrapper[4857]: I1128 15:17:09.534497 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:17:09 crc kubenswrapper[4857]: I1128 15:17:09.682679 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-44vs5" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="registry-server" containerID="cri-o://ec795c6475e66102dac997198e0bc0d615cf8bf566be15ffa61305e036641637" gracePeriod=2 Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.632471 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-b599l" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="registry-server" probeResult="failure" output=< Nov 28 15:17:10 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 15:17:10 crc kubenswrapper[4857]: > Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.669230 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-create-kpv9k"] Nov 28 15:17:10 crc kubenswrapper[4857]: E1128 15:17:10.669686 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="extract-utilities" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.669705 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="extract-utilities" Nov 28 15:17:10 crc kubenswrapper[4857]: E1128 15:17:10.669729 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="extract-content" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.669736 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="extract-content" Nov 28 15:17:10 crc kubenswrapper[4857]: E1128 15:17:10.669762 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="registry-server" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.669768 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="registry-server" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.670003 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f3985e8-1760-454f-bb07-709aa0645788" containerName="registry-server" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.670782 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.687256 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-kpv9k"] Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.694247 4857 generic.go:334] "Generic (PLEG): container finished" podID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerID="ec795c6475e66102dac997198e0bc0d615cf8bf566be15ffa61305e036641637" exitCode=0 Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.694289 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerDied","Data":"ec795c6475e66102dac997198e0bc0d615cf8bf566be15ffa61305e036641637"} Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.694318 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-44vs5" event={"ID":"3a50a352-ecd4-4e36-898f-f5bf63eb4543","Type":"ContainerDied","Data":"d3ae82c70c3a8005005278daff2977d349a670a46cb7f451f110ed17978bc816"} Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.694330 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d3ae82c70c3a8005005278daff2977d349a670a46cb7f451f110ed17978bc816" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.713236 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.774715 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-7585-account-create-update-5ttmq"] Nov 28 15:17:10 crc kubenswrapper[4857]: E1128 15:17:10.775309 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="registry-server" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.775327 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="registry-server" Nov 28 15:17:10 crc kubenswrapper[4857]: E1128 15:17:10.775367 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="extract-content" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.775374 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="extract-content" Nov 28 15:17:10 crc kubenswrapper[4857]: E1128 15:17:10.775389 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="extract-utilities" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.775396 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="extract-utilities" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.775611 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" containerName="registry-server" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.776992 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.779740 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-db-secret" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.787462 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-7585-account-create-update-5ttmq"] Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.803290 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfkfr\" (UniqueName: \"kubernetes.io/projected/3a50a352-ecd4-4e36-898f-f5bf63eb4543-kube-api-access-mfkfr\") pod \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.803385 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-utilities\") pod \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.803591 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-catalog-content\") pod \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\" (UID: \"3a50a352-ecd4-4e36-898f-f5bf63eb4543\") " Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.804272 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pv5nv\" (UniqueName: \"kubernetes.io/projected/6720d85b-09ef-423d-86e6-5eff98a5cfce-kube-api-access-pv5nv\") pod \"manila-db-create-kpv9k\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.804347 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6720d85b-09ef-423d-86e6-5eff98a5cfce-operator-scripts\") pod \"manila-db-create-kpv9k\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.806828 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-utilities" (OuterVolumeSpecName: "utilities") pod "3a50a352-ecd4-4e36-898f-f5bf63eb4543" (UID: "3a50a352-ecd4-4e36-898f-f5bf63eb4543"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.813505 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a50a352-ecd4-4e36-898f-f5bf63eb4543-kube-api-access-mfkfr" (OuterVolumeSpecName: "kube-api-access-mfkfr") pod "3a50a352-ecd4-4e36-898f-f5bf63eb4543" (UID: "3a50a352-ecd4-4e36-898f-f5bf63eb4543"). InnerVolumeSpecName "kube-api-access-mfkfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.906737 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-operator-scripts\") pod \"manila-7585-account-create-update-5ttmq\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.906859 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pv5nv\" (UniqueName: \"kubernetes.io/projected/6720d85b-09ef-423d-86e6-5eff98a5cfce-kube-api-access-pv5nv\") pod \"manila-db-create-kpv9k\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.906910 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6720d85b-09ef-423d-86e6-5eff98a5cfce-operator-scripts\") pod \"manila-db-create-kpv9k\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.906969 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mg5vx\" (UniqueName: \"kubernetes.io/projected/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-kube-api-access-mg5vx\") pod \"manila-7585-account-create-update-5ttmq\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.907788 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6720d85b-09ef-423d-86e6-5eff98a5cfce-operator-scripts\") pod \"manila-db-create-kpv9k\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.907901 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfkfr\" (UniqueName: \"kubernetes.io/projected/3a50a352-ecd4-4e36-898f-f5bf63eb4543-kube-api-access-mfkfr\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.907951 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.924770 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3a50a352-ecd4-4e36-898f-f5bf63eb4543" (UID: "3a50a352-ecd4-4e36-898f-f5bf63eb4543"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:10 crc kubenswrapper[4857]: I1128 15:17:10.926581 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pv5nv\" (UniqueName: \"kubernetes.io/projected/6720d85b-09ef-423d-86e6-5eff98a5cfce-kube-api-access-pv5nv\") pod \"manila-db-create-kpv9k\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.009740 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-operator-scripts\") pod \"manila-7585-account-create-update-5ttmq\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.009929 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mg5vx\" (UniqueName: \"kubernetes.io/projected/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-kube-api-access-mg5vx\") pod \"manila-7585-account-create-update-5ttmq\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.010057 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3a50a352-ecd4-4e36-898f-f5bf63eb4543-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.010604 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-operator-scripts\") pod \"manila-7585-account-create-update-5ttmq\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.020430 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.029214 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mg5vx\" (UniqueName: \"kubernetes.io/projected/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-kube-api-access-mg5vx\") pod \"manila-7585-account-create-update-5ttmq\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.099815 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.308735 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.309302 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.612804 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-create-kpv9k"] Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.707289 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-kpv9k" event={"ID":"6720d85b-09ef-423d-86e6-5eff98a5cfce","Type":"ContainerStarted","Data":"3afff33a1ba11ca004087c5549b9082ac6b658cc86bd800e323d899017d5f491"} Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.707335 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-44vs5" Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.753695 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-44vs5"] Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.765856 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-44vs5"] Nov 28 15:17:11 crc kubenswrapper[4857]: W1128 15:17:11.777253 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9231dac1_f922_44f8_a7e6_9a5372fc6e8b.slice/crio-45cd9d7709f039e31f8fbe46ae38f1835b042a062100c58ee611389346c00e42 WatchSource:0}: Error finding container 45cd9d7709f039e31f8fbe46ae38f1835b042a062100c58ee611389346c00e42: Status 404 returned error can't find the container with id 45cd9d7709f039e31f8fbe46ae38f1835b042a062100c58ee611389346c00e42 Nov 28 15:17:11 crc kubenswrapper[4857]: I1128 15:17:11.779436 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-7585-account-create-update-5ttmq"] Nov 28 15:17:12 crc kubenswrapper[4857]: I1128 15:17:12.245040 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a50a352-ecd4-4e36-898f-f5bf63eb4543" path="/var/lib/kubelet/pods/3a50a352-ecd4-4e36-898f-f5bf63eb4543/volumes" Nov 28 15:17:12 crc kubenswrapper[4857]: I1128 15:17:12.741519 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7585-account-create-update-5ttmq" event={"ID":"9231dac1-f922-44f8-a7e6-9a5372fc6e8b","Type":"ContainerStarted","Data":"7caac08d8e0ca10cad1fd84103169092d288584dd161eee66bb7f6260e96a6d2"} Nov 28 15:17:12 crc kubenswrapper[4857]: I1128 15:17:12.741597 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7585-account-create-update-5ttmq" event={"ID":"9231dac1-f922-44f8-a7e6-9a5372fc6e8b","Type":"ContainerStarted","Data":"45cd9d7709f039e31f8fbe46ae38f1835b042a062100c58ee611389346c00e42"} Nov 28 15:17:12 crc kubenswrapper[4857]: I1128 15:17:12.747769 4857 generic.go:334] "Generic (PLEG): container finished" podID="6720d85b-09ef-423d-86e6-5eff98a5cfce" containerID="275a2b305e0584ed4385a53c78e73bc58e35bb06ff41728cce7052d24d5cd7b1" exitCode=0 Nov 28 15:17:12 crc kubenswrapper[4857]: I1128 15:17:12.747879 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-kpv9k" event={"ID":"6720d85b-09ef-423d-86e6-5eff98a5cfce","Type":"ContainerDied","Data":"275a2b305e0584ed4385a53c78e73bc58e35bb06ff41728cce7052d24d5cd7b1"} Nov 28 15:17:12 crc kubenswrapper[4857]: I1128 15:17:12.776454 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-7585-account-create-update-5ttmq" podStartSLOduration=2.776424446 podStartE2EDuration="2.776424446s" podCreationTimestamp="2025-11-28 15:17:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:17:12.761001944 +0000 UTC m=+6482.884943411" watchObservedRunningTime="2025-11-28 15:17:12.776424446 +0000 UTC m=+6482.900365893" Nov 28 15:17:13 crc kubenswrapper[4857]: I1128 15:17:13.766480 4857 generic.go:334] "Generic (PLEG): container finished" podID="9231dac1-f922-44f8-a7e6-9a5372fc6e8b" containerID="7caac08d8e0ca10cad1fd84103169092d288584dd161eee66bb7f6260e96a6d2" exitCode=0 Nov 28 15:17:13 crc kubenswrapper[4857]: I1128 15:17:13.766614 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7585-account-create-update-5ttmq" event={"ID":"9231dac1-f922-44f8-a7e6-9a5372fc6e8b","Type":"ContainerDied","Data":"7caac08d8e0ca10cad1fd84103169092d288584dd161eee66bb7f6260e96a6d2"} Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.250746 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.416996 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6720d85b-09ef-423d-86e6-5eff98a5cfce-operator-scripts\") pod \"6720d85b-09ef-423d-86e6-5eff98a5cfce\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.418158 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6720d85b-09ef-423d-86e6-5eff98a5cfce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6720d85b-09ef-423d-86e6-5eff98a5cfce" (UID: "6720d85b-09ef-423d-86e6-5eff98a5cfce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.418745 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pv5nv\" (UniqueName: \"kubernetes.io/projected/6720d85b-09ef-423d-86e6-5eff98a5cfce-kube-api-access-pv5nv\") pod \"6720d85b-09ef-423d-86e6-5eff98a5cfce\" (UID: \"6720d85b-09ef-423d-86e6-5eff98a5cfce\") " Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.420035 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6720d85b-09ef-423d-86e6-5eff98a5cfce-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.439525 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6720d85b-09ef-423d-86e6-5eff98a5cfce-kube-api-access-pv5nv" (OuterVolumeSpecName: "kube-api-access-pv5nv") pod "6720d85b-09ef-423d-86e6-5eff98a5cfce" (UID: "6720d85b-09ef-423d-86e6-5eff98a5cfce"). InnerVolumeSpecName "kube-api-access-pv5nv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.522163 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pv5nv\" (UniqueName: \"kubernetes.io/projected/6720d85b-09ef-423d-86e6-5eff98a5cfce-kube-api-access-pv5nv\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.783940 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-create-kpv9k" Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.783922 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-create-kpv9k" event={"ID":"6720d85b-09ef-423d-86e6-5eff98a5cfce","Type":"ContainerDied","Data":"3afff33a1ba11ca004087c5549b9082ac6b658cc86bd800e323d899017d5f491"} Nov 28 15:17:14 crc kubenswrapper[4857]: I1128 15:17:14.784743 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3afff33a1ba11ca004087c5549b9082ac6b658cc86bd800e323d899017d5f491" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.254907 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.341969 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-operator-scripts\") pod \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.342099 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5vx\" (UniqueName: \"kubernetes.io/projected/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-kube-api-access-mg5vx\") pod \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\" (UID: \"9231dac1-f922-44f8-a7e6-9a5372fc6e8b\") " Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.342628 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9231dac1-f922-44f8-a7e6-9a5372fc6e8b" (UID: "9231dac1-f922-44f8-a7e6-9a5372fc6e8b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.343695 4857 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.353234 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-kube-api-access-mg5vx" (OuterVolumeSpecName: "kube-api-access-mg5vx") pod "9231dac1-f922-44f8-a7e6-9a5372fc6e8b" (UID: "9231dac1-f922-44f8-a7e6-9a5372fc6e8b"). InnerVolumeSpecName "kube-api-access-mg5vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.446499 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5vx\" (UniqueName: \"kubernetes.io/projected/9231dac1-f922-44f8-a7e6-9a5372fc6e8b-kube-api-access-mg5vx\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.800311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-7585-account-create-update-5ttmq" event={"ID":"9231dac1-f922-44f8-a7e6-9a5372fc6e8b","Type":"ContainerDied","Data":"45cd9d7709f039e31f8fbe46ae38f1835b042a062100c58ee611389346c00e42"} Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.800399 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-7585-account-create-update-5ttmq" Nov 28 15:17:15 crc kubenswrapper[4857]: I1128 15:17:15.802792 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="45cd9d7709f039e31f8fbe46ae38f1835b042a062100c58ee611389346c00e42" Nov 28 15:17:19 crc kubenswrapper[4857]: I1128 15:17:19.604790 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:17:19 crc kubenswrapper[4857]: I1128 15:17:19.686968 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:17:19 crc kubenswrapper[4857]: I1128 15:17:19.872050 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b599l"] Nov 28 15:17:20 crc kubenswrapper[4857]: I1128 15:17:20.877694 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b599l" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="registry-server" containerID="cri-o://b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978" gracePeriod=2 Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.274314 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-db-sync-kchp7"] Nov 28 15:17:21 crc kubenswrapper[4857]: E1128 15:17:21.276539 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9231dac1-f922-44f8-a7e6-9a5372fc6e8b" containerName="mariadb-account-create-update" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.276565 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9231dac1-f922-44f8-a7e6-9a5372fc6e8b" containerName="mariadb-account-create-update" Nov 28 15:17:21 crc kubenswrapper[4857]: E1128 15:17:21.276607 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6720d85b-09ef-423d-86e6-5eff98a5cfce" containerName="mariadb-database-create" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.276614 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6720d85b-09ef-423d-86e6-5eff98a5cfce" containerName="mariadb-database-create" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.276827 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6720d85b-09ef-423d-86e6-5eff98a5cfce" containerName="mariadb-database-create" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.276847 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9231dac1-f922-44f8-a7e6-9a5372fc6e8b" containerName="mariadb-account-create-update" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.277740 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.280323 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.280645 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-r6scj" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.286705 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-kchp7"] Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.438581 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8dbs\" (UniqueName: \"kubernetes.io/projected/0603deb1-0077-4f3c-ae56-9d370998e593-kube-api-access-r8dbs\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.438927 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-job-config-data\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.439124 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-config-data\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.439278 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-combined-ca-bundle\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.447794 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.540998 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-combined-ca-bundle\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.541171 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8dbs\" (UniqueName: \"kubernetes.io/projected/0603deb1-0077-4f3c-ae56-9d370998e593-kube-api-access-r8dbs\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.541228 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-job-config-data\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.541264 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-config-data\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.550786 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-config-data\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.553485 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-job-config-data\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.554211 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-combined-ca-bundle\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.561887 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8dbs\" (UniqueName: \"kubernetes.io/projected/0603deb1-0077-4f3c-ae56-9d370998e593-kube-api-access-r8dbs\") pod \"manila-db-sync-kchp7\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.610810 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.643668 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6twv7\" (UniqueName: \"kubernetes.io/projected/cb7a99d7-5bcd-475b-b433-943e8e1cca19-kube-api-access-6twv7\") pod \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.644546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-catalog-content\") pod \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.644812 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-utilities\") pod \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\" (UID: \"cb7a99d7-5bcd-475b-b433-943e8e1cca19\") " Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.646668 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-utilities" (OuterVolumeSpecName: "utilities") pod "cb7a99d7-5bcd-475b-b433-943e8e1cca19" (UID: "cb7a99d7-5bcd-475b-b433-943e8e1cca19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.654895 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb7a99d7-5bcd-475b-b433-943e8e1cca19-kube-api-access-6twv7" (OuterVolumeSpecName: "kube-api-access-6twv7") pod "cb7a99d7-5bcd-475b-b433-943e8e1cca19" (UID: "cb7a99d7-5bcd-475b-b433-943e8e1cca19"). InnerVolumeSpecName "kube-api-access-6twv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.719535 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb7a99d7-5bcd-475b-b433-943e8e1cca19" (UID: "cb7a99d7-5bcd-475b-b433-943e8e1cca19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.749714 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.749760 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6twv7\" (UniqueName: \"kubernetes.io/projected/cb7a99d7-5bcd-475b-b433-943e8e1cca19-kube-api-access-6twv7\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.749772 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb7a99d7-5bcd-475b-b433-943e8e1cca19-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.898807 4857 generic.go:334] "Generic (PLEG): container finished" podID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerID="b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978" exitCode=0 Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.898862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerDied","Data":"b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978"} Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.898899 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b599l" event={"ID":"cb7a99d7-5bcd-475b-b433-943e8e1cca19","Type":"ContainerDied","Data":"e5bcb4323c6f13d011aa52a6481520fa211b40b093157f18c9bfc930e794eb79"} Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.898924 4857 scope.go:117] "RemoveContainer" containerID="b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.898930 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b599l" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.931363 4857 scope.go:117] "RemoveContainer" containerID="2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.966038 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b599l"] Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.972345 4857 scope.go:117] "RemoveContainer" containerID="0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626" Nov 28 15:17:21 crc kubenswrapper[4857]: I1128 15:17:21.986970 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b599l"] Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.016415 4857 scope.go:117] "RemoveContainer" containerID="b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978" Nov 28 15:17:22 crc kubenswrapper[4857]: E1128 15:17:22.017576 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978\": container with ID starting with b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978 not found: ID does not exist" containerID="b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.017633 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978"} err="failed to get container status \"b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978\": rpc error: code = NotFound desc = could not find container \"b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978\": container with ID starting with b4156be6c023204165404b7193e84efc8c662d52b4839f5941581ecf99122978 not found: ID does not exist" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.017656 4857 scope.go:117] "RemoveContainer" containerID="2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc" Nov 28 15:17:22 crc kubenswrapper[4857]: E1128 15:17:22.018110 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc\": container with ID starting with 2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc not found: ID does not exist" containerID="2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.018129 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc"} err="failed to get container status \"2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc\": rpc error: code = NotFound desc = could not find container \"2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc\": container with ID starting with 2520a6a2850619c6ba57406d2f2434373a9912b5d0ebdc4b805b487a29ba8adc not found: ID does not exist" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.018146 4857 scope.go:117] "RemoveContainer" containerID="0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626" Nov 28 15:17:22 crc kubenswrapper[4857]: E1128 15:17:22.018376 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626\": container with ID starting with 0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626 not found: ID does not exist" containerID="0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.018394 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626"} err="failed to get container status \"0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626\": rpc error: code = NotFound desc = could not find container \"0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626\": container with ID starting with 0ea2e4ec2e6bb3000cbf1e7fe5a01c7c0451286b544c7d42ce6e94b8709c0626 not found: ID does not exist" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.242929 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" path="/var/lib/kubelet/pods/cb7a99d7-5bcd-475b-b433-943e8e1cca19/volumes" Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.289194 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-db-sync-kchp7"] Nov 28 15:17:22 crc kubenswrapper[4857]: W1128 15:17:22.293123 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0603deb1_0077_4f3c_ae56_9d370998e593.slice/crio-95b41068ca28324bb21ddb90875c58abdee5f1b288ac9f10d87c85ec2a09d8de WatchSource:0}: Error finding container 95b41068ca28324bb21ddb90875c58abdee5f1b288ac9f10d87c85ec2a09d8de: Status 404 returned error can't find the container with id 95b41068ca28324bb21ddb90875c58abdee5f1b288ac9f10d87c85ec2a09d8de Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.969116 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-kchp7" event={"ID":"0603deb1-0077-4f3c-ae56-9d370998e593","Type":"ContainerStarted","Data":"95b41068ca28324bb21ddb90875c58abdee5f1b288ac9f10d87c85ec2a09d8de"} Nov 28 15:17:22 crc kubenswrapper[4857]: I1128 15:17:22.992261 4857 scope.go:117] "RemoveContainer" containerID="4b397ac0f11dd7b65a0bbbe748e754f917d38ad873f1cf01e1e6fe9515e5d2d1" Nov 28 15:17:23 crc kubenswrapper[4857]: I1128 15:17:23.055898 4857 scope.go:117] "RemoveContainer" containerID="ba82ca8cd5c155ae56c6776578e535ac2429a735058c25a210d2ce7ec80f1085" Nov 28 15:17:23 crc kubenswrapper[4857]: I1128 15:17:23.095680 4857 scope.go:117] "RemoveContainer" containerID="0fb3206cba2f538ea8066fbc30080b541d1814237828588e02d1b37a903e5638" Nov 28 15:17:27 crc kubenswrapper[4857]: I1128 15:17:27.017446 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-kchp7" event={"ID":"0603deb1-0077-4f3c-ae56-9d370998e593","Type":"ContainerStarted","Data":"a5810a45c09b37d5b1c88e866c86398072cd5ba8081096650cc7e76b95b36db9"} Nov 28 15:17:27 crc kubenswrapper[4857]: I1128 15:17:27.042274 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-db-sync-kchp7" podStartSLOduration=1.9712113759999998 podStartE2EDuration="6.042254266s" podCreationTimestamp="2025-11-28 15:17:21 +0000 UTC" firstStartedPulling="2025-11-28 15:17:22.29636162 +0000 UTC m=+6492.420303067" lastFinishedPulling="2025-11-28 15:17:26.36740452 +0000 UTC m=+6496.491345957" observedRunningTime="2025-11-28 15:17:27.03678803 +0000 UTC m=+6497.160729467" watchObservedRunningTime="2025-11-28 15:17:27.042254266 +0000 UTC m=+6497.166195693" Nov 28 15:17:28 crc kubenswrapper[4857]: I1128 15:17:28.866715 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:17:29 crc kubenswrapper[4857]: I1128 15:17:29.046533 4857 generic.go:334] "Generic (PLEG): container finished" podID="0603deb1-0077-4f3c-ae56-9d370998e593" containerID="a5810a45c09b37d5b1c88e866c86398072cd5ba8081096650cc7e76b95b36db9" exitCode=0 Nov 28 15:17:29 crc kubenswrapper[4857]: I1128 15:17:29.046602 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-kchp7" event={"ID":"0603deb1-0077-4f3c-ae56-9d370998e593","Type":"ContainerDied","Data":"a5810a45c09b37d5b1c88e866c86398072cd5ba8081096650cc7e76b95b36db9"} Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.675297 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.798162 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-combined-ca-bundle\") pod \"0603deb1-0077-4f3c-ae56-9d370998e593\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.798495 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-job-config-data\") pod \"0603deb1-0077-4f3c-ae56-9d370998e593\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.798738 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8dbs\" (UniqueName: \"kubernetes.io/projected/0603deb1-0077-4f3c-ae56-9d370998e593-kube-api-access-r8dbs\") pod \"0603deb1-0077-4f3c-ae56-9d370998e593\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.798919 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-config-data\") pod \"0603deb1-0077-4f3c-ae56-9d370998e593\" (UID: \"0603deb1-0077-4f3c-ae56-9d370998e593\") " Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.806710 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-config-data" (OuterVolumeSpecName: "config-data") pod "0603deb1-0077-4f3c-ae56-9d370998e593" (UID: "0603deb1-0077-4f3c-ae56-9d370998e593"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.814165 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0603deb1-0077-4f3c-ae56-9d370998e593-kube-api-access-r8dbs" (OuterVolumeSpecName: "kube-api-access-r8dbs") pod "0603deb1-0077-4f3c-ae56-9d370998e593" (UID: "0603deb1-0077-4f3c-ae56-9d370998e593"). InnerVolumeSpecName "kube-api-access-r8dbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.818244 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-job-config-data" (OuterVolumeSpecName: "job-config-data") pod "0603deb1-0077-4f3c-ae56-9d370998e593" (UID: "0603deb1-0077-4f3c-ae56-9d370998e593"). InnerVolumeSpecName "job-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.828855 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0603deb1-0077-4f3c-ae56-9d370998e593" (UID: "0603deb1-0077-4f3c-ae56-9d370998e593"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.901611 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8dbs\" (UniqueName: \"kubernetes.io/projected/0603deb1-0077-4f3c-ae56-9d370998e593-kube-api-access-r8dbs\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.901847 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.901906 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:30 crc kubenswrapper[4857]: I1128 15:17:30.902118 4857 reconciler_common.go:293] "Volume detached for volume \"job-config-data\" (UniqueName: \"kubernetes.io/secret/0603deb1-0077-4f3c-ae56-9d370998e593-job-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.067457 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-db-sync-kchp7" event={"ID":"0603deb1-0077-4f3c-ae56-9d370998e593","Type":"ContainerDied","Data":"95b41068ca28324bb21ddb90875c58abdee5f1b288ac9f10d87c85ec2a09d8de"} Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.067677 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95b41068ca28324bb21ddb90875c58abdee5f1b288ac9f10d87c85ec2a09d8de" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.067734 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/manila-db-sync-kchp7" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.361185 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 15:17:31 crc kubenswrapper[4857]: E1128 15:17:31.361755 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0603deb1-0077-4f3c-ae56-9d370998e593" containerName="manila-db-sync" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.361782 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0603deb1-0077-4f3c-ae56-9d370998e593" containerName="manila-db-sync" Nov 28 15:17:31 crc kubenswrapper[4857]: E1128 15:17:31.361854 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="extract-content" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.361866 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="extract-content" Nov 28 15:17:31 crc kubenswrapper[4857]: E1128 15:17:31.361885 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="extract-utilities" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.361894 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="extract-utilities" Nov 28 15:17:31 crc kubenswrapper[4857]: E1128 15:17:31.361904 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="registry-server" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.361912 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="registry-server" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.362194 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb7a99d7-5bcd-475b-b433-943e8e1cca19" containerName="registry-server" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.362235 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0603deb1-0077-4f3c-ae56-9d370998e593" containerName="manila-db-sync" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.364745 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.367251 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scheduler-config-data" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.367448 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-manila-dockercfg-r6scj" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.367503 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-scripts" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.367839 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-config-data" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.376861 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.414448 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-config-data\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.414505 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.414579 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-scripts\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.414628 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8kdgp\" (UniqueName: \"kubernetes.io/projected/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-kube-api-access-8kdgp\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.414741 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.414824 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.517916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8kdgp\" (UniqueName: \"kubernetes.io/projected/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-kube-api-access-8kdgp\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.518092 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.518178 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.518222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-config-data\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.518242 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.518298 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-scripts\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.519149 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-etc-machine-id\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.531348 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-config-data\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.531805 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-scripts\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.534458 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-config-data-custom\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.534870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-combined-ca-bundle\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.551327 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.552935 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8kdgp\" (UniqueName: \"kubernetes.io/projected/93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10-kube-api-access-8kdgp\") pod \"manila-scheduler-0\" (UID: \"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10\") " pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.553471 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.560206 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-share-share1-config-data" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.569835 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620543 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620713 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/904e6cef-0d88-4f3d-8b62-403596acc8b0-ceph\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620771 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/904e6cef-0d88-4f3d-8b62-403596acc8b0-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620820 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66f2f\" (UniqueName: \"kubernetes.io/projected/904e6cef-0d88-4f3d-8b62-403596acc8b0-kube-api-access-66f2f\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620867 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-scripts\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/904e6cef-0d88-4f3d-8b62-403596acc8b0-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.620971 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.621032 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-config-data\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.699717 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-scheduler-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.706643 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-655767558f-bptdq"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.711135 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.723901 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-scripts\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.724234 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/904e6cef-0d88-4f3d-8b62-403596acc8b0-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.724361 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.724464 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-config-data\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.724541 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.724715 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/904e6cef-0d88-4f3d-8b62-403596acc8b0-ceph\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.725001 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/904e6cef-0d88-4f3d-8b62-403596acc8b0-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.725119 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66f2f\" (UniqueName: \"kubernetes.io/projected/904e6cef-0d88-4f3d-8b62-403596acc8b0-kube-api-access-66f2f\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.734833 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-manila\" (UniqueName: \"kubernetes.io/host-path/904e6cef-0d88-4f3d-8b62-403596acc8b0-var-lib-manila\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.735081 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/904e6cef-0d88-4f3d-8b62-403596acc8b0-etc-machine-id\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.744384 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/projected/904e6cef-0d88-4f3d-8b62-403596acc8b0-ceph\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.745098 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-config-data\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.746690 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-config-data-custom\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.747828 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-combined-ca-bundle\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.748028 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/904e6cef-0d88-4f3d-8b62-403596acc8b0-scripts\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.763030 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66f2f\" (UniqueName: \"kubernetes.io/projected/904e6cef-0d88-4f3d-8b62-403596acc8b0-kube-api-access-66f2f\") pod \"manila-share-share1-0\" (UID: \"904e6cef-0d88-4f3d-8b62-403596acc8b0\") " pod="openstack/manila-share-share1-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.763361 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-655767558f-bptdq"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.825911 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/manila-api-0"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.831895 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-config\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.832366 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-nb\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.832525 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8rjg\" (UniqueName: \"kubernetes.io/projected/e578ed60-fa6a-4f91-a16d-eddf53b7a054-kube-api-access-w8rjg\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.832699 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-sb\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.832968 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-dns-svc\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.836349 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.836502 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.843442 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"manila-api-config-data" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.939844 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-sb\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940237 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-config-data-custom\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940303 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-scripts\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940346 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-dns-svc\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940372 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhnr6\" (UniqueName: \"kubernetes.io/projected/1f657912-1eca-43ba-bd70-96ddd9745b78-kube-api-access-jhnr6\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940417 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-config\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940440 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-config-data\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940473 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-nb\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940497 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940523 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8rjg\" (UniqueName: \"kubernetes.io/projected/e578ed60-fa6a-4f91-a16d-eddf53b7a054-kube-api-access-w8rjg\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940539 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f657912-1eca-43ba-bd70-96ddd9745b78-logs\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.940591 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1f657912-1eca-43ba-bd70-96ddd9745b78-etc-machine-id\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.941105 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-sb\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.941760 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-nb\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.942497 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-dns-svc\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.942927 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-config\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:31 crc kubenswrapper[4857]: I1128 15:17:31.976144 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-share-share1-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.030792 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8rjg\" (UniqueName: \"kubernetes.io/projected/e578ed60-fa6a-4f91-a16d-eddf53b7a054-kube-api-access-w8rjg\") pod \"dnsmasq-dns-655767558f-bptdq\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047403 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047463 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f657912-1eca-43ba-bd70-96ddd9745b78-logs\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047489 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1f657912-1eca-43ba-bd70-96ddd9745b78-etc-machine-id\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047554 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-config-data-custom\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047615 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-scripts\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047660 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhnr6\" (UniqueName: \"kubernetes.io/projected/1f657912-1eca-43ba-bd70-96ddd9745b78-kube-api-access-jhnr6\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.047708 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-config-data\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.053884 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1f657912-1eca-43ba-bd70-96ddd9745b78-logs\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.053983 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1f657912-1eca-43ba-bd70-96ddd9745b78-etc-machine-id\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.060294 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-combined-ca-bundle\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.060303 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-config-data-custom\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.061680 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-scripts\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.091109 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f657912-1eca-43ba-bd70-96ddd9745b78-config-data\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.099851 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhnr6\" (UniqueName: \"kubernetes.io/projected/1f657912-1eca-43ba-bd70-96ddd9745b78-kube-api-access-jhnr6\") pod \"manila-api-0\" (UID: \"1f657912-1eca-43ba-bd70-96ddd9745b78\") " pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.150728 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.182503 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/manila-api-0" Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.470406 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-scheduler-0"] Nov 28 15:17:32 crc kubenswrapper[4857]: I1128 15:17:32.657827 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-share-share1-0"] Nov 28 15:17:33 crc kubenswrapper[4857]: I1128 15:17:33.009682 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-655767558f-bptdq"] Nov 28 15:17:33 crc kubenswrapper[4857]: I1128 15:17:33.045629 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/manila-api-0"] Nov 28 15:17:33 crc kubenswrapper[4857]: W1128 15:17:33.084496 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode578ed60_fa6a_4f91_a16d_eddf53b7a054.slice/crio-2e60745731ec1649085df4390b509f145b42600c2264133f341a21a76a0783e7 WatchSource:0}: Error finding container 2e60745731ec1649085df4390b509f145b42600c2264133f341a21a76a0783e7: Status 404 returned error can't find the container with id 2e60745731ec1649085df4390b509f145b42600c2264133f341a21a76a0783e7 Nov 28 15:17:33 crc kubenswrapper[4857]: I1128 15:17:33.122391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"904e6cef-0d88-4f3d-8b62-403596acc8b0","Type":"ContainerStarted","Data":"8c2cb567ed2ba8f7aca6a1244bd274e3d96c489ab78d01f9aaf1fdaa0aa66787"} Nov 28 15:17:33 crc kubenswrapper[4857]: I1128 15:17:33.130683 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-655767558f-bptdq" event={"ID":"e578ed60-fa6a-4f91-a16d-eddf53b7a054","Type":"ContainerStarted","Data":"2e60745731ec1649085df4390b509f145b42600c2264133f341a21a76a0783e7"} Nov 28 15:17:33 crc kubenswrapper[4857]: I1128 15:17:33.133614 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10","Type":"ContainerStarted","Data":"6a4d12f9954733f45dfcb181e95329e0f91f5bbdd3a948fa21e92f4d940ca74d"} Nov 28 15:17:33 crc kubenswrapper[4857]: I1128 15:17:33.135344 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"1f657912-1eca-43ba-bd70-96ddd9745b78","Type":"ContainerStarted","Data":"2757b3ee2379c958c9d3e9094ec3d02f0f13fbd910bd879f773357f0bfba2cfa"} Nov 28 15:17:34 crc kubenswrapper[4857]: I1128 15:17:34.167830 4857 generic.go:334] "Generic (PLEG): container finished" podID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerID="52b55544e9e0f6fd65107df1be181aa57909cd3dfbab3560d9e850e2a2cbb8ad" exitCode=0 Nov 28 15:17:34 crc kubenswrapper[4857]: I1128 15:17:34.167985 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-655767558f-bptdq" event={"ID":"e578ed60-fa6a-4f91-a16d-eddf53b7a054","Type":"ContainerDied","Data":"52b55544e9e0f6fd65107df1be181aa57909cd3dfbab3560d9e850e2a2cbb8ad"} Nov 28 15:17:34 crc kubenswrapper[4857]: I1128 15:17:34.170261 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10","Type":"ContainerStarted","Data":"a28cfee92897d0178143e0bd67787a5bda64ea93f0df84811bc66c89dbc49470"} Nov 28 15:17:34 crc kubenswrapper[4857]: I1128 15:17:34.174146 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"1f657912-1eca-43ba-bd70-96ddd9745b78","Type":"ContainerStarted","Data":"ebf2384dd154de0e95c83618da6d2bf4f6c6c9e721d25aab37fe42ee9049a95b"} Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.202542 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-scheduler-0" event={"ID":"93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10","Type":"ContainerStarted","Data":"8b52f364ff808c47aa5f5f215a097c515eebe64092dc41de90630785a7da196c"} Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.210332 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/manila-api-0" Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.210929 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-api-0" event={"ID":"1f657912-1eca-43ba-bd70-96ddd9745b78","Type":"ContainerStarted","Data":"b5b47fb9f9aeaf9c735b39fc52734f2e0413713fc71e63954dbd046539847046"} Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.215406 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-655767558f-bptdq" event={"ID":"e578ed60-fa6a-4f91-a16d-eddf53b7a054","Type":"ContainerStarted","Data":"f23c816775c40d441ce000d9f7415adfe55ad80acf04b218e728bed52b4f9a0d"} Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.215574 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.231292 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-scheduler-0" podStartSLOduration=3.572145904 podStartE2EDuration="4.231257109s" podCreationTimestamp="2025-11-28 15:17:31 +0000 UTC" firstStartedPulling="2025-11-28 15:17:32.484266735 +0000 UTC m=+6502.608208172" lastFinishedPulling="2025-11-28 15:17:33.14337794 +0000 UTC m=+6503.267319377" observedRunningTime="2025-11-28 15:17:35.222969837 +0000 UTC m=+6505.346911274" watchObservedRunningTime="2025-11-28 15:17:35.231257109 +0000 UTC m=+6505.355198546" Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.264603 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-655767558f-bptdq" podStartSLOduration=4.264576829 podStartE2EDuration="4.264576829s" podCreationTimestamp="2025-11-28 15:17:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:17:35.250746399 +0000 UTC m=+6505.374687836" watchObservedRunningTime="2025-11-28 15:17:35.264576829 +0000 UTC m=+6505.388518256" Nov 28 15:17:35 crc kubenswrapper[4857]: I1128 15:17:35.285338 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-api-0" podStartSLOduration=4.285314133 podStartE2EDuration="4.285314133s" podCreationTimestamp="2025-11-28 15:17:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:17:35.282993321 +0000 UTC m=+6505.406934758" watchObservedRunningTime="2025-11-28 15:17:35.285314133 +0000 UTC m=+6505.409255570" Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.305195 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"904e6cef-0d88-4f3d-8b62-403596acc8b0","Type":"ContainerStarted","Data":"961200daf5e563aeb053a8f7d499b64f02440aa640e2aea7d7b0d23a81a21923"} Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.305987 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/manila-share-share1-0" event={"ID":"904e6cef-0d88-4f3d-8b62-403596acc8b0","Type":"ContainerStarted","Data":"bd281fd6401fc242835e31c3fe907a9e882474457f9bb2fce63730347b021114"} Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.308709 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.309024 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.309312 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.310690 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.310996 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" gracePeriod=600 Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.348389 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/manila-share-share1-0" podStartSLOduration=3.127801726 podStartE2EDuration="10.348356371s" podCreationTimestamp="2025-11-28 15:17:31 +0000 UTC" firstStartedPulling="2025-11-28 15:17:32.668561228 +0000 UTC m=+6502.792502665" lastFinishedPulling="2025-11-28 15:17:39.889115863 +0000 UTC m=+6510.013057310" observedRunningTime="2025-11-28 15:17:41.343133022 +0000 UTC m=+6511.467074479" watchObservedRunningTime="2025-11-28 15:17:41.348356371 +0000 UTC m=+6511.472297838" Nov 28 15:17:41 crc kubenswrapper[4857]: E1128 15:17:41.446273 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.702176 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-scheduler-0" Nov 28 15:17:41 crc kubenswrapper[4857]: I1128 15:17:41.976831 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/manila-share-share1-0" Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.153176 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.225508 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77946ff95c-6lx4x"] Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.226244 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerName="dnsmasq-dns" containerID="cri-o://7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34" gracePeriod=10 Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.326450 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" exitCode=0 Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.327734 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3"} Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.328092 4857 scope.go:117] "RemoveContainer" containerID="cd65dc19359f44ea74d1e09fca4c6fc276670fdbfc5a599a66977a1d2ab62015" Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.328547 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:17:42 crc kubenswrapper[4857]: E1128 15:17:42.328822 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.875065 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.960397 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-sb\") pod \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.960483 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n58cl\" (UniqueName: \"kubernetes.io/projected/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-kube-api-access-n58cl\") pod \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.960541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-nb\") pod \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.960660 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-config\") pod \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.960719 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-dns-svc\") pod \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\" (UID: \"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a\") " Nov 28 15:17:42 crc kubenswrapper[4857]: I1128 15:17:42.966455 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-kube-api-access-n58cl" (OuterVolumeSpecName: "kube-api-access-n58cl") pod "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" (UID: "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a"). InnerVolumeSpecName "kube-api-access-n58cl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.022688 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" (UID: "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.038939 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-config" (OuterVolumeSpecName: "config") pod "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" (UID: "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.041723 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" (UID: "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.049090 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" (UID: "38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.063557 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.063611 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.063621 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.063634 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n58cl\" (UniqueName: \"kubernetes.io/projected/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-kube-api-access-n58cl\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.063644 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.340392 4857 generic.go:334] "Generic (PLEG): container finished" podID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerID="7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34" exitCode=0 Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.340471 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" event={"ID":"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a","Type":"ContainerDied","Data":"7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34"} Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.340510 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.341857 4857 scope.go:117] "RemoveContainer" containerID="7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.341725 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77946ff95c-6lx4x" event={"ID":"38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a","Type":"ContainerDied","Data":"c44de8e4a0f26f7e7bdde860bc83dc296ccdfa6ab4ca47ef9292708c46a5f1ac"} Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.409840 4857 scope.go:117] "RemoveContainer" containerID="4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.414589 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77946ff95c-6lx4x"] Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.425299 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77946ff95c-6lx4x"] Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.445906 4857 scope.go:117] "RemoveContainer" containerID="7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34" Nov 28 15:17:43 crc kubenswrapper[4857]: E1128 15:17:43.452473 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34\": container with ID starting with 7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34 not found: ID does not exist" containerID="7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.452524 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34"} err="failed to get container status \"7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34\": rpc error: code = NotFound desc = could not find container \"7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34\": container with ID starting with 7ffd7115e9102ff97fdbe30e09d8be2f64b6b2cdb7486adf533e31b153b31e34 not found: ID does not exist" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.452582 4857 scope.go:117] "RemoveContainer" containerID="4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a" Nov 28 15:17:43 crc kubenswrapper[4857]: E1128 15:17:43.452919 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a\": container with ID starting with 4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a not found: ID does not exist" containerID="4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a" Nov 28 15:17:43 crc kubenswrapper[4857]: I1128 15:17:43.452972 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a"} err="failed to get container status \"4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a\": rpc error: code = NotFound desc = could not find container \"4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a\": container with ID starting with 4731ce721e51c0bb599a4c795b3ecf341e61c98d1105fb770d63642092a40f6a not found: ID does not exist" Nov 28 15:17:44 crc kubenswrapper[4857]: I1128 15:17:44.243836 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" path="/var/lib/kubelet/pods/38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a/volumes" Nov 28 15:17:44 crc kubenswrapper[4857]: I1128 15:17:44.355666 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:17:44 crc kubenswrapper[4857]: I1128 15:17:44.356015 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-central-agent" containerID="cri-o://aaf55408d02a69651e640ca6158cbcb2120b129ecabe87cf828d559c4b6657c1" gracePeriod=30 Nov 28 15:17:44 crc kubenswrapper[4857]: I1128 15:17:44.356088 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-notification-agent" containerID="cri-o://06898a54470bd38c2b2f99a5454dc46b10250484593d05c664daeb8989cbdfdd" gracePeriod=30 Nov 28 15:17:44 crc kubenswrapper[4857]: I1128 15:17:44.356108 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="sg-core" containerID="cri-o://c476c9b6874639293a506752a48ab47d73f550024729b6bc830d0fdfeb201722" gracePeriod=30 Nov 28 15:17:44 crc kubenswrapper[4857]: I1128 15:17:44.356129 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="proxy-httpd" containerID="cri-o://1bcdbe82eec9679518b5db412823dabed0acea0c5b52fa944ee72c0e314d0470" gracePeriod=30 Nov 28 15:17:45 crc kubenswrapper[4857]: I1128 15:17:45.369396 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerID="1bcdbe82eec9679518b5db412823dabed0acea0c5b52fa944ee72c0e314d0470" exitCode=0 Nov 28 15:17:45 crc kubenswrapper[4857]: I1128 15:17:45.370008 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerID="c476c9b6874639293a506752a48ab47d73f550024729b6bc830d0fdfeb201722" exitCode=2 Nov 28 15:17:45 crc kubenswrapper[4857]: I1128 15:17:45.370022 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerID="aaf55408d02a69651e640ca6158cbcb2120b129ecabe87cf828d559c4b6657c1" exitCode=0 Nov 28 15:17:45 crc kubenswrapper[4857]: I1128 15:17:45.369456 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerDied","Data":"1bcdbe82eec9679518b5db412823dabed0acea0c5b52fa944ee72c0e314d0470"} Nov 28 15:17:45 crc kubenswrapper[4857]: I1128 15:17:45.370069 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerDied","Data":"c476c9b6874639293a506752a48ab47d73f550024729b6bc830d0fdfeb201722"} Nov 28 15:17:45 crc kubenswrapper[4857]: I1128 15:17:45.370089 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerDied","Data":"aaf55408d02a69651e640ca6158cbcb2120b129ecabe87cf828d559c4b6657c1"} Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.429803 4857 generic.go:334] "Generic (PLEG): container finished" podID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerID="06898a54470bd38c2b2f99a5454dc46b10250484593d05c664daeb8989cbdfdd" exitCode=0 Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.429869 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerDied","Data":"06898a54470bd38c2b2f99a5454dc46b10250484593d05c664daeb8989cbdfdd"} Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.558176 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.644498 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-sg-core-conf-yaml\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.644680 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-log-httpd\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.644764 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-run-httpd\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.644856 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ncr4g\" (UniqueName: \"kubernetes.io/projected/aa4d8101-f823-4bc9-8042-57dd8c70af27-kube-api-access-ncr4g\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.644923 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-config-data\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.645015 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-scripts\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.645078 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-combined-ca-bundle\") pod \"aa4d8101-f823-4bc9-8042-57dd8c70af27\" (UID: \"aa4d8101-f823-4bc9-8042-57dd8c70af27\") " Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.647150 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.648421 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.658131 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa4d8101-f823-4bc9-8042-57dd8c70af27-kube-api-access-ncr4g" (OuterVolumeSpecName: "kube-api-access-ncr4g") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "kube-api-access-ncr4g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.673244 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-scripts" (OuterVolumeSpecName: "scripts") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.696129 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.742581 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.747748 4857 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.747779 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ncr4g\" (UniqueName: \"kubernetes.io/projected/aa4d8101-f823-4bc9-8042-57dd8c70af27-kube-api-access-ncr4g\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.747790 4857 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-scripts\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.747800 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.747809 4857 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.747843 4857 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/aa4d8101-f823-4bc9-8042-57dd8c70af27-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.769521 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-config-data" (OuterVolumeSpecName: "config-data") pod "aa4d8101-f823-4bc9-8042-57dd8c70af27" (UID: "aa4d8101-f823-4bc9-8042-57dd8c70af27"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:17:50 crc kubenswrapper[4857]: I1128 15:17:50.851553 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aa4d8101-f823-4bc9-8042-57dd8c70af27-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.445060 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"aa4d8101-f823-4bc9-8042-57dd8c70af27","Type":"ContainerDied","Data":"31976e9e81f79451c70b9a5e71efd22ed765ded0557cc09c7d18d92d391696da"} Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.445122 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.445136 4857 scope.go:117] "RemoveContainer" containerID="1bcdbe82eec9679518b5db412823dabed0acea0c5b52fa944ee72c0e314d0470" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.511158 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.513222 4857 scope.go:117] "RemoveContainer" containerID="c476c9b6874639293a506752a48ab47d73f550024729b6bc830d0fdfeb201722" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.526930 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.540842 4857 scope.go:117] "RemoveContainer" containerID="06898a54470bd38c2b2f99a5454dc46b10250484593d05c664daeb8989cbdfdd" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.541185 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:17:51 crc kubenswrapper[4857]: E1128 15:17:51.542090 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerName="dnsmasq-dns" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542118 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerName="dnsmasq-dns" Nov 28 15:17:51 crc kubenswrapper[4857]: E1128 15:17:51.542133 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerName="init" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542143 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerName="init" Nov 28 15:17:51 crc kubenswrapper[4857]: E1128 15:17:51.542166 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-notification-agent" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542175 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-notification-agent" Nov 28 15:17:51 crc kubenswrapper[4857]: E1128 15:17:51.542191 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="sg-core" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542199 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="sg-core" Nov 28 15:17:51 crc kubenswrapper[4857]: E1128 15:17:51.542215 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="proxy-httpd" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542223 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="proxy-httpd" Nov 28 15:17:51 crc kubenswrapper[4857]: E1128 15:17:51.542260 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-central-agent" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542269 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-central-agent" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542570 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="sg-core" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542597 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="38456bfe-5e31-4d4f-9cb7-12c4bb7fdf2a" containerName="dnsmasq-dns" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542617 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-central-agent" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542639 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="ceilometer-notification-agent" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.542656 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" containerName="proxy-httpd" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.545523 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.554261 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.554581 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.556897 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.586253 4857 scope.go:117] "RemoveContainer" containerID="aaf55408d02a69651e640ca6158cbcb2120b129ecabe87cf828d559c4b6657c1" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.676545 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32ca0f07-a0bc-43f3-8095-256d0b40e335-log-httpd\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.676695 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.676888 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-scripts\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.676930 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-config-data\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.676984 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32ca0f07-a0bc-43f3-8095-256d0b40e335-run-httpd\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.677041 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.677177 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phfpg\" (UniqueName: \"kubernetes.io/projected/32ca0f07-a0bc-43f3-8095-256d0b40e335-kube-api-access-phfpg\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.780236 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.780872 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phfpg\" (UniqueName: \"kubernetes.io/projected/32ca0f07-a0bc-43f3-8095-256d0b40e335-kube-api-access-phfpg\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.781027 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32ca0f07-a0bc-43f3-8095-256d0b40e335-log-httpd\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.781108 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.781209 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-scripts\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.781240 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-config-data\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.781276 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32ca0f07-a0bc-43f3-8095-256d0b40e335-run-httpd\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.781832 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32ca0f07-a0bc-43f3-8095-256d0b40e335-run-httpd\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.783167 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/32ca0f07-a0bc-43f3-8095-256d0b40e335-log-httpd\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.786632 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.787278 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.788103 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-scripts\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.789281 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32ca0f07-a0bc-43f3-8095-256d0b40e335-config-data\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.801904 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phfpg\" (UniqueName: \"kubernetes.io/projected/32ca0f07-a0bc-43f3-8095-256d0b40e335-kube-api-access-phfpg\") pod \"ceilometer-0\" (UID: \"32ca0f07-a0bc-43f3-8095-256d0b40e335\") " pod="openstack/ceilometer-0" Nov 28 15:17:51 crc kubenswrapper[4857]: I1128 15:17:51.870707 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 28 15:17:52 crc kubenswrapper[4857]: I1128 15:17:52.252225 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa4d8101-f823-4bc9-8042-57dd8c70af27" path="/var/lib/kubelet/pods/aa4d8101-f823-4bc9-8042-57dd8c70af27/volumes" Nov 28 15:17:52 crc kubenswrapper[4857]: I1128 15:17:52.504145 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 28 15:17:52 crc kubenswrapper[4857]: W1128 15:17:52.514654 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32ca0f07_a0bc_43f3_8095_256d0b40e335.slice/crio-58e9ac9b23047bedb0b9d8dcf7970b3c53c7ac6067a4723031fe68816ce57c12 WatchSource:0}: Error finding container 58e9ac9b23047bedb0b9d8dcf7970b3c53c7ac6067a4723031fe68816ce57c12: Status 404 returned error can't find the container with id 58e9ac9b23047bedb0b9d8dcf7970b3c53c7ac6067a4723031fe68816ce57c12 Nov 28 15:17:53 crc kubenswrapper[4857]: I1128 15:17:53.230186 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:17:53 crc kubenswrapper[4857]: E1128 15:17:53.230858 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:17:53 crc kubenswrapper[4857]: I1128 15:17:53.395018 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-scheduler-0" Nov 28 15:17:53 crc kubenswrapper[4857]: I1128 15:17:53.480735 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32ca0f07-a0bc-43f3-8095-256d0b40e335","Type":"ContainerStarted","Data":"58e9ac9b23047bedb0b9d8dcf7970b3c53c7ac6067a4723031fe68816ce57c12"} Nov 28 15:17:53 crc kubenswrapper[4857]: I1128 15:17:53.553236 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/manila-api-0" Nov 28 15:17:53 crc kubenswrapper[4857]: I1128 15:17:53.807335 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/manila-share-share1-0" Nov 28 15:17:54 crc kubenswrapper[4857]: I1128 15:17:54.491902 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32ca0f07-a0bc-43f3-8095-256d0b40e335","Type":"ContainerStarted","Data":"63dcd2be8a8a74ca0381646e3baf8563f206407a5d700c96f2a5555a8530c4ba"} Nov 28 15:17:55 crc kubenswrapper[4857]: I1128 15:17:55.513251 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32ca0f07-a0bc-43f3-8095-256d0b40e335","Type":"ContainerStarted","Data":"c967237cf2363d6451fcea1b4bd48eff1168ee3a58456b24cc5dca5db5d7f275"} Nov 28 15:17:56 crc kubenswrapper[4857]: I1128 15:17:56.530515 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32ca0f07-a0bc-43f3-8095-256d0b40e335","Type":"ContainerStarted","Data":"ab67741f1e39cf2c0a7551b6e3cdad4682e0a00ccc97d82d3b35b1c25907c324"} Nov 28 15:17:58 crc kubenswrapper[4857]: I1128 15:17:58.556359 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"32ca0f07-a0bc-43f3-8095-256d0b40e335","Type":"ContainerStarted","Data":"608be756b447bb12350fe358c9dd8309b171b815fd17bf5b3f325e8c189bc6f5"} Nov 28 15:17:58 crc kubenswrapper[4857]: I1128 15:17:58.559253 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 28 15:17:58 crc kubenswrapper[4857]: I1128 15:17:58.582199 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.850753038 podStartE2EDuration="7.582176708s" podCreationTimestamp="2025-11-28 15:17:51 +0000 UTC" firstStartedPulling="2025-11-28 15:17:52.518450281 +0000 UTC m=+6522.642391718" lastFinishedPulling="2025-11-28 15:17:57.249873951 +0000 UTC m=+6527.373815388" observedRunningTime="2025-11-28 15:17:58.577512744 +0000 UTC m=+6528.701454191" watchObservedRunningTime="2025-11-28 15:17:58.582176708 +0000 UTC m=+6528.706118145" Nov 28 15:18:05 crc kubenswrapper[4857]: I1128 15:18:05.230352 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:18:05 crc kubenswrapper[4857]: E1128 15:18:05.231865 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:18:16 crc kubenswrapper[4857]: I1128 15:18:16.229607 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:18:16 crc kubenswrapper[4857]: E1128 15:18:16.230961 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:18:23 crc kubenswrapper[4857]: I1128 15:18:23.133805 4857 patch_prober.go:28] interesting pod/console-67bf585c5f-wk8qv container/console namespace/openshift-console: Readiness probe status=failure output="Get \"https://10.217.0.53:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 28 15:18:23 crc kubenswrapper[4857]: I1128 15:18:23.142360 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/console-67bf585c5f-wk8qv" podUID="dcfbc948-be7e-49c8-aa2f-a434a950c498" containerName="console" probeResult="failure" output="Get \"https://10.217.0.53:8443/health\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 28 15:18:23 crc kubenswrapper[4857]: I1128 15:18:23.146086 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 28 15:18:28 crc kubenswrapper[4857]: I1128 15:18:28.228506 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:18:28 crc kubenswrapper[4857]: E1128 15:18:28.229591 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:18:39 crc kubenswrapper[4857]: I1128 15:18:39.229479 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:18:39 crc kubenswrapper[4857]: E1128 15:18:39.230303 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.715108 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f58fdf755-h5cch"] Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.717678 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.720728 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.729032 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f58fdf755-h5cch"] Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.797783 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvgc5\" (UniqueName: \"kubernetes.io/projected/70e67834-bd76-4f5f-a2f6-e845622547ef-kube-api-access-tvgc5\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.797865 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-nb\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.798291 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-sb\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.798364 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-openstack-cell1\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.798984 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-config\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.799097 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-dns-svc\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.902178 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvgc5\" (UniqueName: \"kubernetes.io/projected/70e67834-bd76-4f5f-a2f6-e845622547ef-kube-api-access-tvgc5\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.902269 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-nb\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.902335 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-sb\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.902355 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-openstack-cell1\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.902482 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-config\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.902516 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-dns-svc\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.903541 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-nb\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.903599 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-config\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.903656 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-sb\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.903793 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-openstack-cell1\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.903818 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-dns-svc\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:42 crc kubenswrapper[4857]: I1128 15:18:42.930784 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvgc5\" (UniqueName: \"kubernetes.io/projected/70e67834-bd76-4f5f-a2f6-e845622547ef-kube-api-access-tvgc5\") pod \"dnsmasq-dns-5f58fdf755-h5cch\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:43 crc kubenswrapper[4857]: I1128 15:18:43.048611 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:43 crc kubenswrapper[4857]: I1128 15:18:43.940824 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f58fdf755-h5cch"] Nov 28 15:18:44 crc kubenswrapper[4857]: I1128 15:18:44.419797 4857 generic.go:334] "Generic (PLEG): container finished" podID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerID="f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d" exitCode=0 Nov 28 15:18:44 crc kubenswrapper[4857]: I1128 15:18:44.419860 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" event={"ID":"70e67834-bd76-4f5f-a2f6-e845622547ef","Type":"ContainerDied","Data":"f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d"} Nov 28 15:18:44 crc kubenswrapper[4857]: I1128 15:18:44.420343 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" event={"ID":"70e67834-bd76-4f5f-a2f6-e845622547ef","Type":"ContainerStarted","Data":"c54f182849cb4fd2d78a0a255d1329c85eca712061731e6b57fddc33a403ad1c"} Nov 28 15:18:45 crc kubenswrapper[4857]: I1128 15:18:45.441353 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" event={"ID":"70e67834-bd76-4f5f-a2f6-e845622547ef","Type":"ContainerStarted","Data":"4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9"} Nov 28 15:18:46 crc kubenswrapper[4857]: I1128 15:18:46.457195 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:46 crc kubenswrapper[4857]: I1128 15:18:46.502487 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" podStartSLOduration=4.50245393 podStartE2EDuration="4.50245393s" podCreationTimestamp="2025-11-28 15:18:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:18:46.488687792 +0000 UTC m=+6576.612629259" watchObservedRunningTime="2025-11-28 15:18:46.50245393 +0000 UTC m=+6576.626395407" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.050215 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.122588 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-655767558f-bptdq"] Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.122914 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-655767558f-bptdq" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerName="dnsmasq-dns" containerID="cri-o://f23c816775c40d441ce000d9f7415adfe55ad80acf04b218e728bed52b4f9a0d" gracePeriod=10 Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.486996 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5768f785f9-lgxzj"] Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.494318 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.517258 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5768f785f9-lgxzj"] Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.543545 4857 generic.go:334] "Generic (PLEG): container finished" podID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerID="f23c816775c40d441ce000d9f7415adfe55ad80acf04b218e728bed52b4f9a0d" exitCode=0 Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.543592 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-655767558f-bptdq" event={"ID":"e578ed60-fa6a-4f91-a16d-eddf53b7a054","Type":"ContainerDied","Data":"f23c816775c40d441ce000d9f7415adfe55ad80acf04b218e728bed52b4f9a0d"} Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.573229 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-ovsdbserver-sb\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.573521 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkbrh\" (UniqueName: \"kubernetes.io/projected/2895b69d-98a4-41f5-8a13-50954e0d72dd-kube-api-access-lkbrh\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.573627 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-dns-svc\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.573759 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-ovsdbserver-nb\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.573893 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-openstack-cell1\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.574054 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-config\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.676078 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-ovsdbserver-sb\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.676245 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkbrh\" (UniqueName: \"kubernetes.io/projected/2895b69d-98a4-41f5-8a13-50954e0d72dd-kube-api-access-lkbrh\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.676337 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-dns-svc\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.677300 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-dns-svc\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.677920 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-ovsdbserver-sb\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.678164 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-ovsdbserver-nb\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.678427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-openstack-cell1\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.678620 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-config\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.679432 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-config\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.679190 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-ovsdbserver-nb\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.680797 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/2895b69d-98a4-41f5-8a13-50954e0d72dd-openstack-cell1\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.707320 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkbrh\" (UniqueName: \"kubernetes.io/projected/2895b69d-98a4-41f5-8a13-50954e0d72dd-kube-api-access-lkbrh\") pod \"dnsmasq-dns-5768f785f9-lgxzj\" (UID: \"2895b69d-98a4-41f5-8a13-50954e0d72dd\") " pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.789662 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.820986 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.882547 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-dns-svc\") pod \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.882633 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-config\") pod \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.882729 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8rjg\" (UniqueName: \"kubernetes.io/projected/e578ed60-fa6a-4f91-a16d-eddf53b7a054-kube-api-access-w8rjg\") pod \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.882773 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-nb\") pod \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.882855 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-sb\") pod \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\" (UID: \"e578ed60-fa6a-4f91-a16d-eddf53b7a054\") " Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.890459 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e578ed60-fa6a-4f91-a16d-eddf53b7a054-kube-api-access-w8rjg" (OuterVolumeSpecName: "kube-api-access-w8rjg") pod "e578ed60-fa6a-4f91-a16d-eddf53b7a054" (UID: "e578ed60-fa6a-4f91-a16d-eddf53b7a054"). InnerVolumeSpecName "kube-api-access-w8rjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.940443 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e578ed60-fa6a-4f91-a16d-eddf53b7a054" (UID: "e578ed60-fa6a-4f91-a16d-eddf53b7a054"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.957642 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e578ed60-fa6a-4f91-a16d-eddf53b7a054" (UID: "e578ed60-fa6a-4f91-a16d-eddf53b7a054"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.960360 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e578ed60-fa6a-4f91-a16d-eddf53b7a054" (UID: "e578ed60-fa6a-4f91-a16d-eddf53b7a054"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.961461 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-config" (OuterVolumeSpecName: "config") pod "e578ed60-fa6a-4f91-a16d-eddf53b7a054" (UID: "e578ed60-fa6a-4f91-a16d-eddf53b7a054"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.986345 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.986377 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.986389 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8rjg\" (UniqueName: \"kubernetes.io/projected/e578ed60-fa6a-4f91-a16d-eddf53b7a054-kube-api-access-w8rjg\") on node \"crc\" DevicePath \"\"" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.986399 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:18:53 crc kubenswrapper[4857]: I1128 15:18:53.986408 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e578ed60-fa6a-4f91-a16d-eddf53b7a054-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.231768 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:18:54 crc kubenswrapper[4857]: E1128 15:18:54.243665 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.420547 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5768f785f9-lgxzj"] Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.553403 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" event={"ID":"2895b69d-98a4-41f5-8a13-50954e0d72dd","Type":"ContainerStarted","Data":"28b850d5a8da3f156fe0d6f5cea2831c3bf7b00f879c8cbb160ab3df66029a90"} Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.559064 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-655767558f-bptdq" event={"ID":"e578ed60-fa6a-4f91-a16d-eddf53b7a054","Type":"ContainerDied","Data":"2e60745731ec1649085df4390b509f145b42600c2264133f341a21a76a0783e7"} Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.559111 4857 scope.go:117] "RemoveContainer" containerID="f23c816775c40d441ce000d9f7415adfe55ad80acf04b218e728bed52b4f9a0d" Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.559288 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-655767558f-bptdq" Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.664854 4857 scope.go:117] "RemoveContainer" containerID="52b55544e9e0f6fd65107df1be181aa57909cd3dfbab3560d9e850e2a2cbb8ad" Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.699856 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-655767558f-bptdq"] Nov 28 15:18:54 crc kubenswrapper[4857]: I1128 15:18:54.710794 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-655767558f-bptdq"] Nov 28 15:18:55 crc kubenswrapper[4857]: I1128 15:18:55.574311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" event={"ID":"2895b69d-98a4-41f5-8a13-50954e0d72dd","Type":"ContainerStarted","Data":"935c74a593869b4843cb7d55f53b0ddabb6dbb4bc84d928f63402590cd4b85fe"} Nov 28 15:18:56 crc kubenswrapper[4857]: I1128 15:18:56.242162 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" path="/var/lib/kubelet/pods/e578ed60-fa6a-4f91-a16d-eddf53b7a054/volumes" Nov 28 15:18:56 crc kubenswrapper[4857]: I1128 15:18:56.590804 4857 generic.go:334] "Generic (PLEG): container finished" podID="2895b69d-98a4-41f5-8a13-50954e0d72dd" containerID="935c74a593869b4843cb7d55f53b0ddabb6dbb4bc84d928f63402590cd4b85fe" exitCode=0 Nov 28 15:18:56 crc kubenswrapper[4857]: I1128 15:18:56.590878 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" event={"ID":"2895b69d-98a4-41f5-8a13-50954e0d72dd","Type":"ContainerDied","Data":"935c74a593869b4843cb7d55f53b0ddabb6dbb4bc84d928f63402590cd4b85fe"} Nov 28 15:18:58 crc kubenswrapper[4857]: I1128 15:18:58.623979 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" event={"ID":"2895b69d-98a4-41f5-8a13-50954e0d72dd","Type":"ContainerStarted","Data":"519d6a2d2019ede172d7d078420064395f8a9b0f47c0529b9c1a1d510229005c"} Nov 28 15:18:58 crc kubenswrapper[4857]: I1128 15:18:58.624355 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:18:58 crc kubenswrapper[4857]: I1128 15:18:58.655273 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" podStartSLOduration=5.65524913 podStartE2EDuration="5.65524913s" podCreationTimestamp="2025-11-28 15:18:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 15:18:58.646386263 +0000 UTC m=+6588.770327720" watchObservedRunningTime="2025-11-28 15:18:58.65524913 +0000 UTC m=+6588.779190567" Nov 28 15:19:03 crc kubenswrapper[4857]: I1128 15:19:03.823171 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5768f785f9-lgxzj" Nov 28 15:19:03 crc kubenswrapper[4857]: I1128 15:19:03.897679 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f58fdf755-h5cch"] Nov 28 15:19:03 crc kubenswrapper[4857]: I1128 15:19:03.898086 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerName="dnsmasq-dns" containerID="cri-o://4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9" gracePeriod=10 Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.407450 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.493094 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-config\") pod \"70e67834-bd76-4f5f-a2f6-e845622547ef\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.493161 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvgc5\" (UniqueName: \"kubernetes.io/projected/70e67834-bd76-4f5f-a2f6-e845622547ef-kube-api-access-tvgc5\") pod \"70e67834-bd76-4f5f-a2f6-e845622547ef\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.493336 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-nb\") pod \"70e67834-bd76-4f5f-a2f6-e845622547ef\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.493452 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-openstack-cell1\") pod \"70e67834-bd76-4f5f-a2f6-e845622547ef\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.493481 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-dns-svc\") pod \"70e67834-bd76-4f5f-a2f6-e845622547ef\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.493540 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-sb\") pod \"70e67834-bd76-4f5f-a2f6-e845622547ef\" (UID: \"70e67834-bd76-4f5f-a2f6-e845622547ef\") " Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.525233 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70e67834-bd76-4f5f-a2f6-e845622547ef-kube-api-access-tvgc5" (OuterVolumeSpecName: "kube-api-access-tvgc5") pod "70e67834-bd76-4f5f-a2f6-e845622547ef" (UID: "70e67834-bd76-4f5f-a2f6-e845622547ef"). InnerVolumeSpecName "kube-api-access-tvgc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.558743 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-config" (OuterVolumeSpecName: "config") pod "70e67834-bd76-4f5f-a2f6-e845622547ef" (UID: "70e67834-bd76-4f5f-a2f6-e845622547ef"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.559163 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "70e67834-bd76-4f5f-a2f6-e845622547ef" (UID: "70e67834-bd76-4f5f-a2f6-e845622547ef"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.588323 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "70e67834-bd76-4f5f-a2f6-e845622547ef" (UID: "70e67834-bd76-4f5f-a2f6-e845622547ef"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.592843 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "70e67834-bd76-4f5f-a2f6-e845622547ef" (UID: "70e67834-bd76-4f5f-a2f6-e845622547ef"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.600216 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "70e67834-bd76-4f5f-a2f6-e845622547ef" (UID: "70e67834-bd76-4f5f-a2f6-e845622547ef"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.602147 4857 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.602174 4857 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.602183 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.602195 4857 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-config\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.602207 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvgc5\" (UniqueName: \"kubernetes.io/projected/70e67834-bd76-4f5f-a2f6-e845622547ef-kube-api-access-tvgc5\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.602217 4857 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/70e67834-bd76-4f5f-a2f6-e845622547ef-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.700022 4857 generic.go:334] "Generic (PLEG): container finished" podID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerID="4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9" exitCode=0 Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.700073 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" event={"ID":"70e67834-bd76-4f5f-a2f6-e845622547ef","Type":"ContainerDied","Data":"4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9"} Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.700106 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" event={"ID":"70e67834-bd76-4f5f-a2f6-e845622547ef","Type":"ContainerDied","Data":"c54f182849cb4fd2d78a0a255d1329c85eca712061731e6b57fddc33a403ad1c"} Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.700130 4857 scope.go:117] "RemoveContainer" containerID="4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.700187 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f58fdf755-h5cch" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.724776 4857 scope.go:117] "RemoveContainer" containerID="f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.740683 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f58fdf755-h5cch"] Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.748333 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f58fdf755-h5cch"] Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.772806 4857 scope.go:117] "RemoveContainer" containerID="4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9" Nov 28 15:19:04 crc kubenswrapper[4857]: E1128 15:19:04.773723 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9\": container with ID starting with 4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9 not found: ID does not exist" containerID="4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.773787 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9"} err="failed to get container status \"4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9\": rpc error: code = NotFound desc = could not find container \"4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9\": container with ID starting with 4da333ae879ae96bed779e5111a40d4b748d9f7fdd95d79715860fabddbc87f9 not found: ID does not exist" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.773825 4857 scope.go:117] "RemoveContainer" containerID="f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d" Nov 28 15:19:04 crc kubenswrapper[4857]: E1128 15:19:04.774343 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d\": container with ID starting with f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d not found: ID does not exist" containerID="f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d" Nov 28 15:19:04 crc kubenswrapper[4857]: I1128 15:19:04.774416 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d"} err="failed to get container status \"f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d\": rpc error: code = NotFound desc = could not find container \"f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d\": container with ID starting with f386c4ea7a4196c01266a7b3a5be6a3a6baa3a88797f891d633c9355e41fc91d not found: ID does not exist" Nov 28 15:19:05 crc kubenswrapper[4857]: I1128 15:19:05.228805 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:19:05 crc kubenswrapper[4857]: E1128 15:19:05.229294 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:19:06 crc kubenswrapper[4857]: I1128 15:19:06.249589 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" path="/var/lib/kubelet/pods/70e67834-bd76-4f5f-a2f6-e845622547ef/volumes" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.814407 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2"] Nov 28 15:19:14 crc kubenswrapper[4857]: E1128 15:19:14.815290 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerName="dnsmasq-dns" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.815304 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerName="dnsmasq-dns" Nov 28 15:19:14 crc kubenswrapper[4857]: E1128 15:19:14.815317 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerName="init" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.815324 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerName="init" Nov 28 15:19:14 crc kubenswrapper[4857]: E1128 15:19:14.815348 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerName="dnsmasq-dns" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.815355 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerName="dnsmasq-dns" Nov 28 15:19:14 crc kubenswrapper[4857]: E1128 15:19:14.815363 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerName="init" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.815369 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerName="init" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.815580 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="70e67834-bd76-4f5f-a2f6-e845622547ef" containerName="dnsmasq-dns" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.815601 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e578ed60-fa6a-4f91-a16d-eddf53b7a054" containerName="dnsmasq-dns" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.821889 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.830998 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.831355 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.831544 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.831640 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.836403 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2"] Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.890505 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.892641 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.892723 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.893000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.893347 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9h4c\" (UniqueName: \"kubernetes.io/projected/28b59422-5490-4fcc-a7cd-a4e11842e1d4-kube-api-access-j9h4c\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.995357 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.995408 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.995464 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.995558 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9h4c\" (UniqueName: \"kubernetes.io/projected/28b59422-5490-4fcc-a7cd-a4e11842e1d4-kube-api-access-j9h4c\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:14 crc kubenswrapper[4857]: I1128 15:19:14.995601 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.002425 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.003184 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.005036 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ceph\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.006100 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.016890 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9h4c\" (UniqueName: \"kubernetes.io/projected/28b59422-5490-4fcc-a7cd-a4e11842e1d4-kube-api-access-j9h4c\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.159350 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.593537 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2"] Nov 28 15:19:15 crc kubenswrapper[4857]: W1128 15:19:15.599611 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28b59422_5490_4fcc_a7cd_a4e11842e1d4.slice/crio-46bb12acab93ed685d38a0e3bf99c7e1067da9c12af33a0b4649aad3d6a65dac WatchSource:0}: Error finding container 46bb12acab93ed685d38a0e3bf99c7e1067da9c12af33a0b4649aad3d6a65dac: Status 404 returned error can't find the container with id 46bb12acab93ed685d38a0e3bf99c7e1067da9c12af33a0b4649aad3d6a65dac Nov 28 15:19:15 crc kubenswrapper[4857]: I1128 15:19:15.847914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" event={"ID":"28b59422-5490-4fcc-a7cd-a4e11842e1d4","Type":"ContainerStarted","Data":"46bb12acab93ed685d38a0e3bf99c7e1067da9c12af33a0b4649aad3d6a65dac"} Nov 28 15:19:18 crc kubenswrapper[4857]: I1128 15:19:18.229603 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:19:18 crc kubenswrapper[4857]: E1128 15:19:18.230455 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:19:26 crc kubenswrapper[4857]: I1128 15:19:26.053857 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-c8jwf"] Nov 28 15:19:26 crc kubenswrapper[4857]: I1128 15:19:26.065137 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-c8jwf"] Nov 28 15:19:26 crc kubenswrapper[4857]: I1128 15:19:26.244706 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7787446-63bc-4c06-8cd1-4b40ef714de8" path="/var/lib/kubelet/pods/f7787446-63bc-4c06-8cd1-4b40ef714de8/volumes" Nov 28 15:19:28 crc kubenswrapper[4857]: I1128 15:19:28.056307 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-fe57-account-create-update-ft9d9"] Nov 28 15:19:28 crc kubenswrapper[4857]: I1128 15:19:28.070532 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-fe57-account-create-update-ft9d9"] Nov 28 15:19:28 crc kubenswrapper[4857]: I1128 15:19:28.255999 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c90553bd-80df-4379-a1a7-dff0ca2619d3" path="/var/lib/kubelet/pods/c90553bd-80df-4379-a1a7-dff0ca2619d3/volumes" Nov 28 15:19:31 crc kubenswrapper[4857]: I1128 15:19:31.045225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" event={"ID":"28b59422-5490-4fcc-a7cd-a4e11842e1d4","Type":"ContainerStarted","Data":"d425903a2200519602596e2d061aa60a5e1dd38a36849157c5ce8f32d7fa39f5"} Nov 28 15:19:31 crc kubenswrapper[4857]: I1128 15:19:31.082272 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" podStartSLOduration=2.578717829 podStartE2EDuration="17.082246777s" podCreationTimestamp="2025-11-28 15:19:14 +0000 UTC" firstStartedPulling="2025-11-28 15:19:15.602330738 +0000 UTC m=+6605.726272185" lastFinishedPulling="2025-11-28 15:19:30.105859656 +0000 UTC m=+6620.229801133" observedRunningTime="2025-11-28 15:19:31.072419764 +0000 UTC m=+6621.196361251" watchObservedRunningTime="2025-11-28 15:19:31.082246777 +0000 UTC m=+6621.206188214" Nov 28 15:19:33 crc kubenswrapper[4857]: I1128 15:19:33.229928 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:19:33 crc kubenswrapper[4857]: E1128 15:19:33.231196 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:19:34 crc kubenswrapper[4857]: I1128 15:19:34.027008 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-q927d"] Nov 28 15:19:34 crc kubenswrapper[4857]: I1128 15:19:34.036596 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-q927d"] Nov 28 15:19:34 crc kubenswrapper[4857]: I1128 15:19:34.243649 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08e82f77-ad92-4db1-bf6e-fa8a02ae639b" path="/var/lib/kubelet/pods/08e82f77-ad92-4db1-bf6e-fa8a02ae639b/volumes" Nov 28 15:19:35 crc kubenswrapper[4857]: I1128 15:19:35.032554 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-2384-account-create-update-jzq8h"] Nov 28 15:19:35 crc kubenswrapper[4857]: I1128 15:19:35.042299 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-2384-account-create-update-jzq8h"] Nov 28 15:19:36 crc kubenswrapper[4857]: I1128 15:19:36.250840 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78b86780-aa3c-4783-9fbf-69f51b0e4e62" path="/var/lib/kubelet/pods/78b86780-aa3c-4783-9fbf-69f51b0e4e62/volumes" Nov 28 15:19:45 crc kubenswrapper[4857]: I1128 15:19:45.196009 4857 generic.go:334] "Generic (PLEG): container finished" podID="28b59422-5490-4fcc-a7cd-a4e11842e1d4" containerID="d425903a2200519602596e2d061aa60a5e1dd38a36849157c5ce8f32d7fa39f5" exitCode=0 Nov 28 15:19:45 crc kubenswrapper[4857]: I1128 15:19:45.196107 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" event={"ID":"28b59422-5490-4fcc-a7cd-a4e11842e1d4","Type":"ContainerDied","Data":"d425903a2200519602596e2d061aa60a5e1dd38a36849157c5ce8f32d7fa39f5"} Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.625255 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.708246 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-inventory\") pod \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.708304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ceph\") pod \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.708523 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j9h4c\" (UniqueName: \"kubernetes.io/projected/28b59422-5490-4fcc-a7cd-a4e11842e1d4-kube-api-access-j9h4c\") pod \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.708593 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-pre-adoption-validation-combined-ca-bundle\") pod \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.708616 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ssh-key\") pod \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\" (UID: \"28b59422-5490-4fcc-a7cd-a4e11842e1d4\") " Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.715269 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28b59422-5490-4fcc-a7cd-a4e11842e1d4-kube-api-access-j9h4c" (OuterVolumeSpecName: "kube-api-access-j9h4c") pod "28b59422-5490-4fcc-a7cd-a4e11842e1d4" (UID: "28b59422-5490-4fcc-a7cd-a4e11842e1d4"). InnerVolumeSpecName "kube-api-access-j9h4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.717022 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ceph" (OuterVolumeSpecName: "ceph") pod "28b59422-5490-4fcc-a7cd-a4e11842e1d4" (UID: "28b59422-5490-4fcc-a7cd-a4e11842e1d4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.730413 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "28b59422-5490-4fcc-a7cd-a4e11842e1d4" (UID: "28b59422-5490-4fcc-a7cd-a4e11842e1d4"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.742442 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-inventory" (OuterVolumeSpecName: "inventory") pod "28b59422-5490-4fcc-a7cd-a4e11842e1d4" (UID: "28b59422-5490-4fcc-a7cd-a4e11842e1d4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.753680 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "28b59422-5490-4fcc-a7cd-a4e11842e1d4" (UID: "28b59422-5490-4fcc-a7cd-a4e11842e1d4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.813970 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.814010 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.814023 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j9h4c\" (UniqueName: \"kubernetes.io/projected/28b59422-5490-4fcc-a7cd-a4e11842e1d4-kube-api-access-j9h4c\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.814039 4857 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:46 crc kubenswrapper[4857]: I1128 15:19:46.814052 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/28b59422-5490-4fcc-a7cd-a4e11842e1d4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:19:47 crc kubenswrapper[4857]: I1128 15:19:47.218432 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" event={"ID":"28b59422-5490-4fcc-a7cd-a4e11842e1d4","Type":"ContainerDied","Data":"46bb12acab93ed685d38a0e3bf99c7e1067da9c12af33a0b4649aad3d6a65dac"} Nov 28 15:19:47 crc kubenswrapper[4857]: I1128 15:19:47.218482 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2" Nov 28 15:19:47 crc kubenswrapper[4857]: I1128 15:19:47.218493 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46bb12acab93ed685d38a0e3bf99c7e1067da9c12af33a0b4649aad3d6a65dac" Nov 28 15:19:48 crc kubenswrapper[4857]: I1128 15:19:48.229293 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:19:48 crc kubenswrapper[4857]: E1128 15:19:48.230092 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.280335 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f"] Nov 28 15:19:58 crc kubenswrapper[4857]: E1128 15:19:58.294700 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28b59422-5490-4fcc-a7cd-a4e11842e1d4" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.294733 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="28b59422-5490-4fcc-a7cd-a4e11842e1d4" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.295007 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="28b59422-5490-4fcc-a7cd-a4e11842e1d4" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.296148 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.303525 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.303774 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.303923 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.310327 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.334697 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f"] Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.397888 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8wlc\" (UniqueName: \"kubernetes.io/projected/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-kube-api-access-f8wlc\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.398066 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.398380 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.398455 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.398623 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.499424 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.499537 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8wlc\" (UniqueName: \"kubernetes.io/projected/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-kube-api-access-f8wlc\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.499612 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.499641 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.499682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.524244 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ceph\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.524631 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.534507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.535665 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.536198 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8wlc\" (UniqueName: \"kubernetes.io/projected/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-kube-api-access-f8wlc\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:58 crc kubenswrapper[4857]: I1128 15:19:58.626049 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:19:59 crc kubenswrapper[4857]: I1128 15:19:59.170110 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f"] Nov 28 15:19:59 crc kubenswrapper[4857]: I1128 15:19:59.365446 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" event={"ID":"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12","Type":"ContainerStarted","Data":"bc937da3f7f295a9a1e9201f9ac642e8d8872d1bf68de7ca64ccc85543360781"} Nov 28 15:20:00 crc kubenswrapper[4857]: I1128 15:20:00.407862 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" event={"ID":"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12","Type":"ContainerStarted","Data":"706da768641909ae1160efb5dbc3213de87db6584dd4aa35ed1381f0a95e8bc3"} Nov 28 15:20:00 crc kubenswrapper[4857]: I1128 15:20:00.442742 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" podStartSLOduration=2.265559932 podStartE2EDuration="2.442720584s" podCreationTimestamp="2025-11-28 15:19:58 +0000 UTC" firstStartedPulling="2025-11-28 15:19:59.174393246 +0000 UTC m=+6649.298334683" lastFinishedPulling="2025-11-28 15:19:59.351553888 +0000 UTC m=+6649.475495335" observedRunningTime="2025-11-28 15:20:00.431540066 +0000 UTC m=+6650.555481493" watchObservedRunningTime="2025-11-28 15:20:00.442720584 +0000 UTC m=+6650.566662021" Nov 28 15:20:01 crc kubenswrapper[4857]: I1128 15:20:01.229395 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:20:01 crc kubenswrapper[4857]: E1128 15:20:01.229837 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:20:16 crc kubenswrapper[4857]: I1128 15:20:16.229776 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:20:16 crc kubenswrapper[4857]: E1128 15:20:16.230916 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:20:28 crc kubenswrapper[4857]: I1128 15:20:28.230049 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:20:28 crc kubenswrapper[4857]: E1128 15:20:28.231376 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:20:29 crc kubenswrapper[4857]: I1128 15:20:29.724874 4857 scope.go:117] "RemoveContainer" containerID="defa293fabae210787d2215a9d5d5e802136add47d10ce114244db2f32bd57b6" Nov 28 15:20:29 crc kubenswrapper[4857]: I1128 15:20:29.793462 4857 scope.go:117] "RemoveContainer" containerID="349359cd13104fa413935e63a54a4b6af67d587be8162717a95a7f05c40cd3c5" Nov 28 15:20:29 crc kubenswrapper[4857]: I1128 15:20:29.845407 4857 scope.go:117] "RemoveContainer" containerID="541ab19a9deabff835426965c0eab98aa35fd9f916f8cd696204af9b039a5b48" Nov 28 15:20:29 crc kubenswrapper[4857]: I1128 15:20:29.882762 4857 scope.go:117] "RemoveContainer" containerID="45c82728d485824b52bc0e2420b2549f930819f64e69c90e153bbce41bd281e6" Nov 28 15:20:30 crc kubenswrapper[4857]: I1128 15:20:30.058011 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-pt8bx"] Nov 28 15:20:30 crc kubenswrapper[4857]: I1128 15:20:30.074382 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-pt8bx"] Nov 28 15:20:30 crc kubenswrapper[4857]: I1128 15:20:30.247703 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3222b2f-724a-4c6f-9bc4-8744c4dc3377" path="/var/lib/kubelet/pods/d3222b2f-724a-4c6f-9bc4-8744c4dc3377/volumes" Nov 28 15:20:41 crc kubenswrapper[4857]: I1128 15:20:41.229582 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:20:41 crc kubenswrapper[4857]: E1128 15:20:41.231041 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:20:52 crc kubenswrapper[4857]: I1128 15:20:52.229570 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:20:52 crc kubenswrapper[4857]: E1128 15:20:52.230318 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:21:05 crc kubenswrapper[4857]: I1128 15:21:05.228748 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:21:05 crc kubenswrapper[4857]: E1128 15:21:05.229535 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:21:16 crc kubenswrapper[4857]: I1128 15:21:16.229751 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:21:16 crc kubenswrapper[4857]: E1128 15:21:16.232126 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:21:30 crc kubenswrapper[4857]: I1128 15:21:30.035684 4857 scope.go:117] "RemoveContainer" containerID="e6913053466e9da85fc1ab3b0b8e5710527123e2a5616312ecbabf7e4050c3b1" Nov 28 15:21:30 crc kubenswrapper[4857]: I1128 15:21:30.115713 4857 scope.go:117] "RemoveContainer" containerID="09c50b0a90cd7928918427f63714abf48ff1895323713efcf0ddf9e7dd0db0c6" Nov 28 15:21:30 crc kubenswrapper[4857]: I1128 15:21:30.256658 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:21:30 crc kubenswrapper[4857]: E1128 15:21:30.257004 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:21:41 crc kubenswrapper[4857]: I1128 15:21:41.228847 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:21:41 crc kubenswrapper[4857]: E1128 15:21:41.229775 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.230516 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:21:56 crc kubenswrapper[4857]: E1128 15:21:56.231314 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.404881 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jrp86"] Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.407231 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.431430 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrp86"] Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.556845 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrc8g\" (UniqueName: \"kubernetes.io/projected/87ced74e-85b0-4cc5-9498-e90dc55f96b3-kube-api-access-wrc8g\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.556986 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-utilities\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.557039 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-catalog-content\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.659602 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-catalog-content\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.659766 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrc8g\" (UniqueName: \"kubernetes.io/projected/87ced74e-85b0-4cc5-9498-e90dc55f96b3-kube-api-access-wrc8g\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.659977 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-utilities\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.660156 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-catalog-content\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.660423 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-utilities\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.680666 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrc8g\" (UniqueName: \"kubernetes.io/projected/87ced74e-85b0-4cc5-9498-e90dc55f96b3-kube-api-access-wrc8g\") pod \"redhat-marketplace-jrp86\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:56 crc kubenswrapper[4857]: I1128 15:21:56.748052 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:21:57 crc kubenswrapper[4857]: I1128 15:21:57.201725 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrp86"] Nov 28 15:21:57 crc kubenswrapper[4857]: W1128 15:21:57.207190 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87ced74e_85b0_4cc5_9498_e90dc55f96b3.slice/crio-98b234f88f8cf868d343233e806e4bf0ee5f8e5f03f90bf0115a90cc292ec2ac WatchSource:0}: Error finding container 98b234f88f8cf868d343233e806e4bf0ee5f8e5f03f90bf0115a90cc292ec2ac: Status 404 returned error can't find the container with id 98b234f88f8cf868d343233e806e4bf0ee5f8e5f03f90bf0115a90cc292ec2ac Nov 28 15:21:57 crc kubenswrapper[4857]: I1128 15:21:57.858136 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerStarted","Data":"98b234f88f8cf868d343233e806e4bf0ee5f8e5f03f90bf0115a90cc292ec2ac"} Nov 28 15:21:58 crc kubenswrapper[4857]: I1128 15:21:58.868743 4857 generic.go:334] "Generic (PLEG): container finished" podID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerID="237472dec39b3f7b0fbc50105ee83f2f56d82b01d396001af17a3897e51982a9" exitCode=0 Nov 28 15:21:58 crc kubenswrapper[4857]: I1128 15:21:58.868850 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerDied","Data":"237472dec39b3f7b0fbc50105ee83f2f56d82b01d396001af17a3897e51982a9"} Nov 28 15:21:58 crc kubenswrapper[4857]: I1128 15:21:58.871117 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:21:59 crc kubenswrapper[4857]: I1128 15:21:59.881692 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerStarted","Data":"4b72f1317e4c32caccda4ee2546748218f0ac49eeecf7081f2122765cae46700"} Nov 28 15:22:00 crc kubenswrapper[4857]: I1128 15:22:00.900293 4857 generic.go:334] "Generic (PLEG): container finished" podID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerID="4b72f1317e4c32caccda4ee2546748218f0ac49eeecf7081f2122765cae46700" exitCode=0 Nov 28 15:22:00 crc kubenswrapper[4857]: I1128 15:22:00.900503 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerDied","Data":"4b72f1317e4c32caccda4ee2546748218f0ac49eeecf7081f2122765cae46700"} Nov 28 15:22:01 crc kubenswrapper[4857]: I1128 15:22:01.912277 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerStarted","Data":"e0481235f5c3e6007cef919b0be58db145091181fc95f9d4f4e1a72583ba58cd"} Nov 28 15:22:06 crc kubenswrapper[4857]: I1128 15:22:06.749196 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:22:06 crc kubenswrapper[4857]: I1128 15:22:06.749856 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:22:06 crc kubenswrapper[4857]: I1128 15:22:06.838437 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:22:06 crc kubenswrapper[4857]: I1128 15:22:06.871358 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jrp86" podStartSLOduration=8.120561431 podStartE2EDuration="10.871334536s" podCreationTimestamp="2025-11-28 15:21:56 +0000 UTC" firstStartedPulling="2025-11-28 15:21:58.87089203 +0000 UTC m=+6768.994833467" lastFinishedPulling="2025-11-28 15:22:01.621665145 +0000 UTC m=+6771.745606572" observedRunningTime="2025-11-28 15:22:01.938143058 +0000 UTC m=+6772.062084525" watchObservedRunningTime="2025-11-28 15:22:06.871334536 +0000 UTC m=+6776.995275983" Nov 28 15:22:07 crc kubenswrapper[4857]: I1128 15:22:07.056865 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:22:07 crc kubenswrapper[4857]: I1128 15:22:07.129510 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrp86"] Nov 28 15:22:08 crc kubenswrapper[4857]: I1128 15:22:08.230061 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:22:08 crc kubenswrapper[4857]: E1128 15:22:08.230642 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:22:08 crc kubenswrapper[4857]: I1128 15:22:08.996230 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jrp86" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="registry-server" containerID="cri-o://e0481235f5c3e6007cef919b0be58db145091181fc95f9d4f4e1a72583ba58cd" gracePeriod=2 Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.015381 4857 generic.go:334] "Generic (PLEG): container finished" podID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerID="e0481235f5c3e6007cef919b0be58db145091181fc95f9d4f4e1a72583ba58cd" exitCode=0 Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.015486 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerDied","Data":"e0481235f5c3e6007cef919b0be58db145091181fc95f9d4f4e1a72583ba58cd"} Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.123257 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.307777 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-utilities\") pod \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.307917 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrc8g\" (UniqueName: \"kubernetes.io/projected/87ced74e-85b0-4cc5-9498-e90dc55f96b3-kube-api-access-wrc8g\") pod \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.308005 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-catalog-content\") pod \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\" (UID: \"87ced74e-85b0-4cc5-9498-e90dc55f96b3\") " Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.308986 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-utilities" (OuterVolumeSpecName: "utilities") pod "87ced74e-85b0-4cc5-9498-e90dc55f96b3" (UID: "87ced74e-85b0-4cc5-9498-e90dc55f96b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.317289 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87ced74e-85b0-4cc5-9498-e90dc55f96b3-kube-api-access-wrc8g" (OuterVolumeSpecName: "kube-api-access-wrc8g") pod "87ced74e-85b0-4cc5-9498-e90dc55f96b3" (UID: "87ced74e-85b0-4cc5-9498-e90dc55f96b3"). InnerVolumeSpecName "kube-api-access-wrc8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.348552 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "87ced74e-85b0-4cc5-9498-e90dc55f96b3" (UID: "87ced74e-85b0-4cc5-9498-e90dc55f96b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.411705 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.411754 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrc8g\" (UniqueName: \"kubernetes.io/projected/87ced74e-85b0-4cc5-9498-e90dc55f96b3-kube-api-access-wrc8g\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:10 crc kubenswrapper[4857]: I1128 15:22:10.411767 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/87ced74e-85b0-4cc5-9498-e90dc55f96b3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.036280 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrp86" event={"ID":"87ced74e-85b0-4cc5-9498-e90dc55f96b3","Type":"ContainerDied","Data":"98b234f88f8cf868d343233e806e4bf0ee5f8e5f03f90bf0115a90cc292ec2ac"} Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.036330 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrp86" Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.036848 4857 scope.go:117] "RemoveContainer" containerID="e0481235f5c3e6007cef919b0be58db145091181fc95f9d4f4e1a72583ba58cd" Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.082210 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrp86"] Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.087977 4857 scope.go:117] "RemoveContainer" containerID="4b72f1317e4c32caccda4ee2546748218f0ac49eeecf7081f2122765cae46700" Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.093398 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrp86"] Nov 28 15:22:11 crc kubenswrapper[4857]: I1128 15:22:11.116985 4857 scope.go:117] "RemoveContainer" containerID="237472dec39b3f7b0fbc50105ee83f2f56d82b01d396001af17a3897e51982a9" Nov 28 15:22:12 crc kubenswrapper[4857]: I1128 15:22:12.248290 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" path="/var/lib/kubelet/pods/87ced74e-85b0-4cc5-9498-e90dc55f96b3/volumes" Nov 28 15:22:23 crc kubenswrapper[4857]: I1128 15:22:23.229338 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:22:23 crc kubenswrapper[4857]: E1128 15:22:23.230165 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:22:36 crc kubenswrapper[4857]: I1128 15:22:36.230863 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:22:36 crc kubenswrapper[4857]: E1128 15:22:36.232544 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:22:48 crc kubenswrapper[4857]: I1128 15:22:48.229613 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:22:49 crc kubenswrapper[4857]: I1128 15:22:49.489705 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"346c6f28644616555969ca723cabdfbd7b479b34008bb6546bdb4a4acb540f5d"} Nov 28 15:23:30 crc kubenswrapper[4857]: I1128 15:23:30.245083 4857 scope.go:117] "RemoveContainer" containerID="ec795c6475e66102dac997198e0bc0d615cf8bf566be15ffa61305e036641637" Nov 28 15:23:30 crc kubenswrapper[4857]: I1128 15:23:30.291285 4857 scope.go:117] "RemoveContainer" containerID="7a1ba20e7c1e108386d0c5ab2ee6fda4ad46ebb0304a269f19ca78e5215b5aa3" Nov 28 15:23:30 crc kubenswrapper[4857]: I1128 15:23:30.316876 4857 scope.go:117] "RemoveContainer" containerID="d7e416a48a49f4b6d30d19b271875b35bf84e51159da321857e9646cf5034e6d" Nov 28 15:23:44 crc kubenswrapper[4857]: I1128 15:23:44.045096 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-ca47-account-create-update-w72m7"] Nov 28 15:23:44 crc kubenswrapper[4857]: I1128 15:23:44.059034 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-5drfb"] Nov 28 15:23:44 crc kubenswrapper[4857]: I1128 15:23:44.072629 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-ca47-account-create-update-w72m7"] Nov 28 15:23:44 crc kubenswrapper[4857]: I1128 15:23:44.085536 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-5drfb"] Nov 28 15:23:44 crc kubenswrapper[4857]: I1128 15:23:44.242814 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d8d5035-d774-4c0d-a530-62382257bc8f" path="/var/lib/kubelet/pods/6d8d5035-d774-4c0d-a530-62382257bc8f/volumes" Nov 28 15:23:44 crc kubenswrapper[4857]: I1128 15:23:44.244179 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ca679d3b-5662-4e16-a37d-991087f4857f" path="/var/lib/kubelet/pods/ca679d3b-5662-4e16-a37d-991087f4857f/volumes" Nov 28 15:24:03 crc kubenswrapper[4857]: I1128 15:24:03.045709 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-95psf"] Nov 28 15:24:03 crc kubenswrapper[4857]: I1128 15:24:03.065723 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-95psf"] Nov 28 15:24:04 crc kubenswrapper[4857]: I1128 15:24:04.241530 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63a2503c-af94-42d7-b995-c2fe4018aed0" path="/var/lib/kubelet/pods/63a2503c-af94-42d7-b995-c2fe4018aed0/volumes" Nov 28 15:24:30 crc kubenswrapper[4857]: I1128 15:24:30.406975 4857 scope.go:117] "RemoveContainer" containerID="87ebd22c82949004d2ec9780a6dcaf7bb0c930227c13fac47716b29191785ca5" Nov 28 15:24:30 crc kubenswrapper[4857]: I1128 15:24:30.433915 4857 scope.go:117] "RemoveContainer" containerID="0379c6b834498938a2603a2438b8a9d8528e8ae567af8a1d183dcc27a5cd902a" Nov 28 15:24:30 crc kubenswrapper[4857]: I1128 15:24:30.499205 4857 scope.go:117] "RemoveContainer" containerID="153141947c8750b84fa183f2e95ba4209acd921494e537400d06bf308be0e767" Nov 28 15:25:11 crc kubenswrapper[4857]: I1128 15:25:11.308703 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:25:11 crc kubenswrapper[4857]: I1128 15:25:11.309523 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:25:41 crc kubenswrapper[4857]: I1128 15:25:41.308509 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:25:41 crc kubenswrapper[4857]: I1128 15:25:41.309002 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.308355 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.309377 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.309432 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.310096 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"346c6f28644616555969ca723cabdfbd7b479b34008bb6546bdb4a4acb540f5d"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.310163 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://346c6f28644616555969ca723cabdfbd7b479b34008bb6546bdb4a4acb540f5d" gracePeriod=600 Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.923438 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="346c6f28644616555969ca723cabdfbd7b479b34008bb6546bdb4a4acb540f5d" exitCode=0 Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.923535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"346c6f28644616555969ca723cabdfbd7b479b34008bb6546bdb4a4acb540f5d"} Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.924291 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed"} Nov 28 15:26:11 crc kubenswrapper[4857]: I1128 15:26:11.924317 4857 scope.go:117] "RemoveContainer" containerID="5ef5285bd197ee35f66becdfa3ecde61afe107f66575e8853fe09967aa8011e3" Nov 28 15:26:28 crc kubenswrapper[4857]: I1128 15:26:28.037101 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-3ed0-account-create-update-wtjjl"] Nov 28 15:26:28 crc kubenswrapper[4857]: I1128 15:26:28.046202 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-3ed0-account-create-update-wtjjl"] Nov 28 15:26:28 crc kubenswrapper[4857]: I1128 15:26:28.247846 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73" path="/var/lib/kubelet/pods/46705f3c-1ca3-46fd-9cd2-dcf6bfd62b73/volumes" Nov 28 15:26:29 crc kubenswrapper[4857]: I1128 15:26:29.030238 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-qpksf"] Nov 28 15:26:29 crc kubenswrapper[4857]: I1128 15:26:29.039920 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-qpksf"] Nov 28 15:26:30 crc kubenswrapper[4857]: I1128 15:26:30.243973 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f89f2fe7-8c04-4877-9ccd-7d94509fa1b5" path="/var/lib/kubelet/pods/f89f2fe7-8c04-4877-9ccd-7d94509fa1b5/volumes" Nov 28 15:26:30 crc kubenswrapper[4857]: I1128 15:26:30.641869 4857 scope.go:117] "RemoveContainer" containerID="90d9b9163807207abd4fc4d3c4e517b7d88a9e24ca7a008f5300e344f56a7515" Nov 28 15:26:30 crc kubenswrapper[4857]: I1128 15:26:30.685100 4857 scope.go:117] "RemoveContainer" containerID="c9723de64b7e6004df7af9003bb6f4faa2cccbd281b1b15903f1c13a73f1fa68" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.052420 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-wclgf"] Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.064048 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-wclgf"] Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.245984 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0961f51-514c-40dd-80aa-fcb46e84f2eb" path="/var/lib/kubelet/pods/b0961f51-514c-40dd-80aa-fcb46e84f2eb/volumes" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.684827 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-v7rmb"] Nov 28 15:26:50 crc kubenswrapper[4857]: E1128 15:26:50.688557 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="registry-server" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.688588 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="registry-server" Nov 28 15:26:50 crc kubenswrapper[4857]: E1128 15:26:50.688608 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="extract-utilities" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.688617 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="extract-utilities" Nov 28 15:26:50 crc kubenswrapper[4857]: E1128 15:26:50.688636 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="extract-content" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.688644 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="extract-content" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.688988 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="87ced74e-85b0-4cc5-9498-e90dc55f96b3" containerName="registry-server" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.691777 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.699488 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v7rmb"] Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.849547 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmc6k\" (UniqueName: \"kubernetes.io/projected/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-kube-api-access-nmc6k\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.849635 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-utilities\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.850056 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-catalog-content\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.952920 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-catalog-content\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.953146 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmc6k\" (UniqueName: \"kubernetes.io/projected/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-kube-api-access-nmc6k\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.953173 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-utilities\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.953780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-utilities\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.954486 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-catalog-content\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:50 crc kubenswrapper[4857]: I1128 15:26:50.976078 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmc6k\" (UniqueName: \"kubernetes.io/projected/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-kube-api-access-nmc6k\") pod \"redhat-operators-v7rmb\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:51 crc kubenswrapper[4857]: I1128 15:26:51.018237 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:26:51 crc kubenswrapper[4857]: I1128 15:26:51.553811 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-v7rmb"] Nov 28 15:26:52 crc kubenswrapper[4857]: I1128 15:26:52.422995 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerStarted","Data":"07c200ba8cc3b710f68f33d5f812acf3d5399757d235a60abfbfc6a914966673"} Nov 28 15:26:53 crc kubenswrapper[4857]: I1128 15:26:53.431834 4857 generic.go:334] "Generic (PLEG): container finished" podID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerID="bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62" exitCode=0 Nov 28 15:26:53 crc kubenswrapper[4857]: I1128 15:26:53.432064 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerDied","Data":"bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62"} Nov 28 15:26:55 crc kubenswrapper[4857]: I1128 15:26:55.458006 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerStarted","Data":"462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43"} Nov 28 15:26:59 crc kubenswrapper[4857]: I1128 15:26:59.529468 4857 generic.go:334] "Generic (PLEG): container finished" podID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerID="462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43" exitCode=0 Nov 28 15:26:59 crc kubenswrapper[4857]: I1128 15:26:59.529546 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerDied","Data":"462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43"} Nov 28 15:26:59 crc kubenswrapper[4857]: I1128 15:26:59.533552 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:27:01 crc kubenswrapper[4857]: I1128 15:27:01.552480 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerStarted","Data":"26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3"} Nov 28 15:27:01 crc kubenswrapper[4857]: I1128 15:27:01.599934 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-v7rmb" podStartSLOduration=4.647731552 podStartE2EDuration="11.59991181s" podCreationTimestamp="2025-11-28 15:26:50 +0000 UTC" firstStartedPulling="2025-11-28 15:26:53.434603449 +0000 UTC m=+7063.558544876" lastFinishedPulling="2025-11-28 15:27:00.386783677 +0000 UTC m=+7070.510725134" observedRunningTime="2025-11-28 15:27:01.567552726 +0000 UTC m=+7071.691494163" watchObservedRunningTime="2025-11-28 15:27:01.59991181 +0000 UTC m=+7071.723853247" Nov 28 15:27:11 crc kubenswrapper[4857]: I1128 15:27:11.019124 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:27:11 crc kubenswrapper[4857]: I1128 15:27:11.021179 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:27:11 crc kubenswrapper[4857]: I1128 15:27:11.084960 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:27:11 crc kubenswrapper[4857]: I1128 15:27:11.720161 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:27:11 crc kubenswrapper[4857]: I1128 15:27:11.768844 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v7rmb"] Nov 28 15:27:13 crc kubenswrapper[4857]: I1128 15:27:13.691625 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-v7rmb" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="registry-server" containerID="cri-o://26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3" gracePeriod=2 Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.261480 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.450689 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-catalog-content\") pod \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.451096 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nmc6k\" (UniqueName: \"kubernetes.io/projected/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-kube-api-access-nmc6k\") pod \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.451277 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-utilities\") pod \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\" (UID: \"77b8c4f1-9bde-453b-8a69-bce5155e2a4d\") " Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.452511 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-utilities" (OuterVolumeSpecName: "utilities") pod "77b8c4f1-9bde-453b-8a69-bce5155e2a4d" (UID: "77b8c4f1-9bde-453b-8a69-bce5155e2a4d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.457309 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-kube-api-access-nmc6k" (OuterVolumeSpecName: "kube-api-access-nmc6k") pod "77b8c4f1-9bde-453b-8a69-bce5155e2a4d" (UID: "77b8c4f1-9bde-453b-8a69-bce5155e2a4d"). InnerVolumeSpecName "kube-api-access-nmc6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.554299 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nmc6k\" (UniqueName: \"kubernetes.io/projected/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-kube-api-access-nmc6k\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.554338 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.565841 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77b8c4f1-9bde-453b-8a69-bce5155e2a4d" (UID: "77b8c4f1-9bde-453b-8a69-bce5155e2a4d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.656935 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77b8c4f1-9bde-453b-8a69-bce5155e2a4d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.709535 4857 generic.go:334] "Generic (PLEG): container finished" podID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerID="26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3" exitCode=0 Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.709542 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-v7rmb" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.709625 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerDied","Data":"26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3"} Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.709697 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-v7rmb" event={"ID":"77b8c4f1-9bde-453b-8a69-bce5155e2a4d","Type":"ContainerDied","Data":"07c200ba8cc3b710f68f33d5f812acf3d5399757d235a60abfbfc6a914966673"} Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.709729 4857 scope.go:117] "RemoveContainer" containerID="26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.763207 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-v7rmb"] Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.769230 4857 scope.go:117] "RemoveContainer" containerID="462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.779005 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-v7rmb"] Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.804112 4857 scope.go:117] "RemoveContainer" containerID="bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.862359 4857 scope.go:117] "RemoveContainer" containerID="26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3" Nov 28 15:27:14 crc kubenswrapper[4857]: E1128 15:27:14.863033 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3\": container with ID starting with 26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3 not found: ID does not exist" containerID="26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.863072 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3"} err="failed to get container status \"26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3\": rpc error: code = NotFound desc = could not find container \"26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3\": container with ID starting with 26a236894f823d09c04438edfc56e61e6f43c6f8a077507099a35e4a393180d3 not found: ID does not exist" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.863098 4857 scope.go:117] "RemoveContainer" containerID="462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43" Nov 28 15:27:14 crc kubenswrapper[4857]: E1128 15:27:14.863574 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43\": container with ID starting with 462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43 not found: ID does not exist" containerID="462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.863604 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43"} err="failed to get container status \"462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43\": rpc error: code = NotFound desc = could not find container \"462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43\": container with ID starting with 462df8893567ddf59a405f58eedffd81dc8f1df4b34e3f1cdaa673d2c1212e43 not found: ID does not exist" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.863625 4857 scope.go:117] "RemoveContainer" containerID="bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62" Nov 28 15:27:14 crc kubenswrapper[4857]: E1128 15:27:14.864178 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62\": container with ID starting with bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62 not found: ID does not exist" containerID="bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62" Nov 28 15:27:14 crc kubenswrapper[4857]: I1128 15:27:14.864406 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62"} err="failed to get container status \"bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62\": rpc error: code = NotFound desc = could not find container \"bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62\": container with ID starting with bbc23aa227493549200201a2941fb70c0aea833acd7f23dea29d024712017c62 not found: ID does not exist" Nov 28 15:27:15 crc kubenswrapper[4857]: I1128 15:27:15.045015 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-create-kpv9k"] Nov 28 15:27:15 crc kubenswrapper[4857]: I1128 15:27:15.055812 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-7585-account-create-update-5ttmq"] Nov 28 15:27:15 crc kubenswrapper[4857]: I1128 15:27:15.065885 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-create-kpv9k"] Nov 28 15:27:15 crc kubenswrapper[4857]: I1128 15:27:15.075696 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-7585-account-create-update-5ttmq"] Nov 28 15:27:16 crc kubenswrapper[4857]: I1128 15:27:16.241118 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6720d85b-09ef-423d-86e6-5eff98a5cfce" path="/var/lib/kubelet/pods/6720d85b-09ef-423d-86e6-5eff98a5cfce/volumes" Nov 28 15:27:16 crc kubenswrapper[4857]: I1128 15:27:16.242213 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" path="/var/lib/kubelet/pods/77b8c4f1-9bde-453b-8a69-bce5155e2a4d/volumes" Nov 28 15:27:16 crc kubenswrapper[4857]: I1128 15:27:16.243017 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9231dac1-f922-44f8-a7e6-9a5372fc6e8b" path="/var/lib/kubelet/pods/9231dac1-f922-44f8-a7e6-9a5372fc6e8b/volumes" Nov 28 15:27:30 crc kubenswrapper[4857]: I1128 15:27:30.784330 4857 scope.go:117] "RemoveContainer" containerID="275a2b305e0584ed4385a53c78e73bc58e35bb06ff41728cce7052d24d5cd7b1" Nov 28 15:27:30 crc kubenswrapper[4857]: I1128 15:27:30.829483 4857 scope.go:117] "RemoveContainer" containerID="66346b8f98e982e563dec02c14677968c523817998277684428e91e7942d88a8" Nov 28 15:27:30 crc kubenswrapper[4857]: I1128 15:27:30.920025 4857 scope.go:117] "RemoveContainer" containerID="7caac08d8e0ca10cad1fd84103169092d288584dd161eee66bb7f6260e96a6d2" Nov 28 15:27:31 crc kubenswrapper[4857]: I1128 15:27:31.037646 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/manila-db-sync-kchp7"] Nov 28 15:27:31 crc kubenswrapper[4857]: I1128 15:27:31.048431 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/manila-db-sync-kchp7"] Nov 28 15:27:32 crc kubenswrapper[4857]: I1128 15:27:32.241991 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0603deb1-0077-4f3c-ae56-9d370998e593" path="/var/lib/kubelet/pods/0603deb1-0077-4f3c-ae56-9d370998e593/volumes" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.415581 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2fv2f"] Nov 28 15:28:04 crc kubenswrapper[4857]: E1128 15:28:04.417223 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="extract-utilities" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.417252 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="extract-utilities" Nov 28 15:28:04 crc kubenswrapper[4857]: E1128 15:28:04.417277 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="extract-content" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.417290 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="extract-content" Nov 28 15:28:04 crc kubenswrapper[4857]: E1128 15:28:04.417317 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="registry-server" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.417330 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="registry-server" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.417756 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="77b8c4f1-9bde-453b-8a69-bce5155e2a4d" containerName="registry-server" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.420964 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.428211 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2fv2f"] Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.459457 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rztcs\" (UniqueName: \"kubernetes.io/projected/68bccb7e-dab9-4869-9b08-e3f2cd20c571-kube-api-access-rztcs\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.459548 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-utilities\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.459591 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-catalog-content\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.562021 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rztcs\" (UniqueName: \"kubernetes.io/projected/68bccb7e-dab9-4869-9b08-e3f2cd20c571-kube-api-access-rztcs\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.562502 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-utilities\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.562544 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-catalog-content\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.563791 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-catalog-content\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.563876 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-utilities\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.601865 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rztcs\" (UniqueName: \"kubernetes.io/projected/68bccb7e-dab9-4869-9b08-e3f2cd20c571-kube-api-access-rztcs\") pod \"community-operators-2fv2f\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:04 crc kubenswrapper[4857]: I1128 15:28:04.764219 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:05 crc kubenswrapper[4857]: I1128 15:28:05.309728 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2fv2f"] Nov 28 15:28:05 crc kubenswrapper[4857]: I1128 15:28:05.429437 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerStarted","Data":"e8d91e6725dada11010ab6f2e9af5970424e6b0cd8ef8e6d9b375b150d5375f2"} Nov 28 15:28:06 crc kubenswrapper[4857]: I1128 15:28:06.451239 4857 generic.go:334] "Generic (PLEG): container finished" podID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerID="fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654" exitCode=0 Nov 28 15:28:06 crc kubenswrapper[4857]: I1128 15:28:06.451461 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerDied","Data":"fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654"} Nov 28 15:28:10 crc kubenswrapper[4857]: I1128 15:28:10.496698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerStarted","Data":"b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073"} Nov 28 15:28:11 crc kubenswrapper[4857]: I1128 15:28:11.308663 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:28:11 crc kubenswrapper[4857]: I1128 15:28:11.308732 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:28:11 crc kubenswrapper[4857]: I1128 15:28:11.508203 4857 generic.go:334] "Generic (PLEG): container finished" podID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerID="b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073" exitCode=0 Nov 28 15:28:11 crc kubenswrapper[4857]: I1128 15:28:11.508248 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerDied","Data":"b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073"} Nov 28 15:28:13 crc kubenswrapper[4857]: I1128 15:28:13.535848 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerStarted","Data":"e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3"} Nov 28 15:28:13 crc kubenswrapper[4857]: I1128 15:28:13.585829 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2fv2f" podStartSLOduration=3.445073588 podStartE2EDuration="9.585803641s" podCreationTimestamp="2025-11-28 15:28:04 +0000 UTC" firstStartedPulling="2025-11-28 15:28:06.454494489 +0000 UTC m=+7136.578435946" lastFinishedPulling="2025-11-28 15:28:12.595224532 +0000 UTC m=+7142.719165999" observedRunningTime="2025-11-28 15:28:13.581765063 +0000 UTC m=+7143.705706500" watchObservedRunningTime="2025-11-28 15:28:13.585803641 +0000 UTC m=+7143.709745098" Nov 28 15:28:14 crc kubenswrapper[4857]: I1128 15:28:14.765342 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:14 crc kubenswrapper[4857]: I1128 15:28:14.766036 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:15 crc kubenswrapper[4857]: I1128 15:28:15.816427 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-2fv2f" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="registry-server" probeResult="failure" output=< Nov 28 15:28:15 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 15:28:15 crc kubenswrapper[4857]: > Nov 28 15:28:24 crc kubenswrapper[4857]: I1128 15:28:24.825321 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:24 crc kubenswrapper[4857]: I1128 15:28:24.886457 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:25 crc kubenswrapper[4857]: I1128 15:28:25.062668 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2fv2f"] Nov 28 15:28:26 crc kubenswrapper[4857]: I1128 15:28:26.691998 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2fv2f" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="registry-server" containerID="cri-o://e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3" gracePeriod=2 Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.236231 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.386145 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-utilities\") pod \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.388623 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-utilities" (OuterVolumeSpecName: "utilities") pod "68bccb7e-dab9-4869-9b08-e3f2cd20c571" (UID: "68bccb7e-dab9-4869-9b08-e3f2cd20c571"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.390098 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rztcs\" (UniqueName: \"kubernetes.io/projected/68bccb7e-dab9-4869-9b08-e3f2cd20c571-kube-api-access-rztcs\") pod \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.390297 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-catalog-content\") pod \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\" (UID: \"68bccb7e-dab9-4869-9b08-e3f2cd20c571\") " Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.391582 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.404259 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68bccb7e-dab9-4869-9b08-e3f2cd20c571-kube-api-access-rztcs" (OuterVolumeSpecName: "kube-api-access-rztcs") pod "68bccb7e-dab9-4869-9b08-e3f2cd20c571" (UID: "68bccb7e-dab9-4869-9b08-e3f2cd20c571"). InnerVolumeSpecName "kube-api-access-rztcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.469831 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "68bccb7e-dab9-4869-9b08-e3f2cd20c571" (UID: "68bccb7e-dab9-4869-9b08-e3f2cd20c571"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.493504 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rztcs\" (UniqueName: \"kubernetes.io/projected/68bccb7e-dab9-4869-9b08-e3f2cd20c571-kube-api-access-rztcs\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.493536 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/68bccb7e-dab9-4869-9b08-e3f2cd20c571-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.702589 4857 generic.go:334] "Generic (PLEG): container finished" podID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerID="e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3" exitCode=0 Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.702645 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerDied","Data":"e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3"} Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.702676 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2fv2f" event={"ID":"68bccb7e-dab9-4869-9b08-e3f2cd20c571","Type":"ContainerDied","Data":"e8d91e6725dada11010ab6f2e9af5970424e6b0cd8ef8e6d9b375b150d5375f2"} Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.702697 4857 scope.go:117] "RemoveContainer" containerID="e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.702874 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2fv2f" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.733781 4857 scope.go:117] "RemoveContainer" containerID="b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.742968 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2fv2f"] Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.752289 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2fv2f"] Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.766442 4857 scope.go:117] "RemoveContainer" containerID="fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.809271 4857 scope.go:117] "RemoveContainer" containerID="e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3" Nov 28 15:28:27 crc kubenswrapper[4857]: E1128 15:28:27.809872 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3\": container with ID starting with e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3 not found: ID does not exist" containerID="e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.809915 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3"} err="failed to get container status \"e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3\": rpc error: code = NotFound desc = could not find container \"e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3\": container with ID starting with e1f092878b025e6d67e396d0496d5d099db3da9f13cd063dcf6d4084dbc633e3 not found: ID does not exist" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.809971 4857 scope.go:117] "RemoveContainer" containerID="b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073" Nov 28 15:28:27 crc kubenswrapper[4857]: E1128 15:28:27.810385 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073\": container with ID starting with b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073 not found: ID does not exist" containerID="b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.810421 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073"} err="failed to get container status \"b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073\": rpc error: code = NotFound desc = could not find container \"b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073\": container with ID starting with b829d084dcf29f137cdc749fcac14de3ef8b4a70f200cac6c8fa23b5b161b073 not found: ID does not exist" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.810445 4857 scope.go:117] "RemoveContainer" containerID="fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654" Nov 28 15:28:27 crc kubenswrapper[4857]: E1128 15:28:27.810896 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654\": container with ID starting with fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654 not found: ID does not exist" containerID="fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654" Nov 28 15:28:27 crc kubenswrapper[4857]: I1128 15:28:27.810925 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654"} err="failed to get container status \"fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654\": rpc error: code = NotFound desc = could not find container \"fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654\": container with ID starting with fccba3a55b1e7dfc76dd95a3d3e0600d7582e63897c550a2198c19e142c17654 not found: ID does not exist" Nov 28 15:28:28 crc kubenswrapper[4857]: I1128 15:28:28.244394 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" path="/var/lib/kubelet/pods/68bccb7e-dab9-4869-9b08-e3f2cd20c571/volumes" Nov 28 15:28:31 crc kubenswrapper[4857]: I1128 15:28:31.038304 4857 scope.go:117] "RemoveContainer" containerID="a5810a45c09b37d5b1c88e866c86398072cd5ba8081096650cc7e76b95b36db9" Nov 28 15:28:41 crc kubenswrapper[4857]: I1128 15:28:41.308490 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:28:41 crc kubenswrapper[4857]: I1128 15:28:41.309250 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:29:11 crc kubenswrapper[4857]: I1128 15:29:11.313092 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:29:11 crc kubenswrapper[4857]: I1128 15:29:11.314383 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:29:11 crc kubenswrapper[4857]: I1128 15:29:11.314521 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:29:11 crc kubenswrapper[4857]: I1128 15:29:11.315819 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:29:11 crc kubenswrapper[4857]: I1128 15:29:11.315918 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" gracePeriod=600 Nov 28 15:29:11 crc kubenswrapper[4857]: E1128 15:29:11.441863 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:29:12 crc kubenswrapper[4857]: I1128 15:29:12.215159 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" exitCode=0 Nov 28 15:29:12 crc kubenswrapper[4857]: I1128 15:29:12.215227 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed"} Nov 28 15:29:12 crc kubenswrapper[4857]: I1128 15:29:12.215785 4857 scope.go:117] "RemoveContainer" containerID="346c6f28644616555969ca723cabdfbd7b479b34008bb6546bdb4a4acb540f5d" Nov 28 15:29:12 crc kubenswrapper[4857]: I1128 15:29:12.216769 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:29:12 crc kubenswrapper[4857]: E1128 15:29:12.217365 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:29:27 crc kubenswrapper[4857]: I1128 15:29:27.230049 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:29:27 crc kubenswrapper[4857]: E1128 15:29:27.231173 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:29:41 crc kubenswrapper[4857]: I1128 15:29:41.228536 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:29:41 crc kubenswrapper[4857]: E1128 15:29:41.229491 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:29:53 crc kubenswrapper[4857]: I1128 15:29:53.229404 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:29:53 crc kubenswrapper[4857]: E1128 15:29:53.230727 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.148513 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn"] Nov 28 15:30:00 crc kubenswrapper[4857]: E1128 15:30:00.149578 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="registry-server" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.149592 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="registry-server" Nov 28 15:30:00 crc kubenswrapper[4857]: E1128 15:30:00.149607 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="extract-content" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.149613 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="extract-content" Nov 28 15:30:00 crc kubenswrapper[4857]: E1128 15:30:00.149622 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="extract-utilities" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.149632 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="extract-utilities" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.149828 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="68bccb7e-dab9-4869-9b08-e3f2cd20c571" containerName="registry-server" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.150665 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.158025 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.161541 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn"] Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.162976 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.283484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dz9v8\" (UniqueName: \"kubernetes.io/projected/4e8306c3-562b-40d1-a682-a6894fc7bc51-kube-api-access-dz9v8\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.283649 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e8306c3-562b-40d1-a682-a6894fc7bc51-config-volume\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.283702 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e8306c3-562b-40d1-a682-a6894fc7bc51-secret-volume\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.387252 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e8306c3-562b-40d1-a682-a6894fc7bc51-config-volume\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.387645 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e8306c3-562b-40d1-a682-a6894fc7bc51-secret-volume\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.387765 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dz9v8\" (UniqueName: \"kubernetes.io/projected/4e8306c3-562b-40d1-a682-a6894fc7bc51-kube-api-access-dz9v8\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.388963 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e8306c3-562b-40d1-a682-a6894fc7bc51-config-volume\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.403552 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e8306c3-562b-40d1-a682-a6894fc7bc51-secret-volume\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.414524 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dz9v8\" (UniqueName: \"kubernetes.io/projected/4e8306c3-562b-40d1-a682-a6894fc7bc51-kube-api-access-dz9v8\") pod \"collect-profiles-29405730-7kdpn\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.479105 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:00 crc kubenswrapper[4857]: I1128 15:30:00.951792 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn"] Nov 28 15:30:01 crc kubenswrapper[4857]: I1128 15:30:01.837200 4857 generic.go:334] "Generic (PLEG): container finished" podID="4e8306c3-562b-40d1-a682-a6894fc7bc51" containerID="6c694e2a8a305947d717ce15e15503557041bca4224fb28d45b4e13f2718426a" exitCode=0 Nov 28 15:30:01 crc kubenswrapper[4857]: I1128 15:30:01.837339 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" event={"ID":"4e8306c3-562b-40d1-a682-a6894fc7bc51","Type":"ContainerDied","Data":"6c694e2a8a305947d717ce15e15503557041bca4224fb28d45b4e13f2718426a"} Nov 28 15:30:01 crc kubenswrapper[4857]: I1128 15:30:01.837566 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" event={"ID":"4e8306c3-562b-40d1-a682-a6894fc7bc51","Type":"ContainerStarted","Data":"a6a72635ca9dfc333cdfe80d8ac67f49bf39c00a16be3d8d551ea26f26b399bd"} Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.236301 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.359541 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e8306c3-562b-40d1-a682-a6894fc7bc51-secret-volume\") pod \"4e8306c3-562b-40d1-a682-a6894fc7bc51\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.359727 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e8306c3-562b-40d1-a682-a6894fc7bc51-config-volume\") pod \"4e8306c3-562b-40d1-a682-a6894fc7bc51\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.359992 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dz9v8\" (UniqueName: \"kubernetes.io/projected/4e8306c3-562b-40d1-a682-a6894fc7bc51-kube-api-access-dz9v8\") pod \"4e8306c3-562b-40d1-a682-a6894fc7bc51\" (UID: \"4e8306c3-562b-40d1-a682-a6894fc7bc51\") " Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.360607 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4e8306c3-562b-40d1-a682-a6894fc7bc51-config-volume" (OuterVolumeSpecName: "config-volume") pod "4e8306c3-562b-40d1-a682-a6894fc7bc51" (UID: "4e8306c3-562b-40d1-a682-a6894fc7bc51"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.362668 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/4e8306c3-562b-40d1-a682-a6894fc7bc51-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.367102 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e8306c3-562b-40d1-a682-a6894fc7bc51-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "4e8306c3-562b-40d1-a682-a6894fc7bc51" (UID: "4e8306c3-562b-40d1-a682-a6894fc7bc51"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.369718 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e8306c3-562b-40d1-a682-a6894fc7bc51-kube-api-access-dz9v8" (OuterVolumeSpecName: "kube-api-access-dz9v8") pod "4e8306c3-562b-40d1-a682-a6894fc7bc51" (UID: "4e8306c3-562b-40d1-a682-a6894fc7bc51"). InnerVolumeSpecName "kube-api-access-dz9v8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.465970 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dz9v8\" (UniqueName: \"kubernetes.io/projected/4e8306c3-562b-40d1-a682-a6894fc7bc51-kube-api-access-dz9v8\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.466035 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/4e8306c3-562b-40d1-a682-a6894fc7bc51-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.859077 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" event={"ID":"4e8306c3-562b-40d1-a682-a6894fc7bc51","Type":"ContainerDied","Data":"a6a72635ca9dfc333cdfe80d8ac67f49bf39c00a16be3d8d551ea26f26b399bd"} Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.859125 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6a72635ca9dfc333cdfe80d8ac67f49bf39c00a16be3d8d551ea26f26b399bd" Nov 28 15:30:03 crc kubenswrapper[4857]: I1128 15:30:03.859142 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn" Nov 28 15:30:04 crc kubenswrapper[4857]: I1128 15:30:04.324906 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t"] Nov 28 15:30:04 crc kubenswrapper[4857]: I1128 15:30:04.337187 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405685-wvm5t"] Nov 28 15:30:06 crc kubenswrapper[4857]: I1128 15:30:06.228669 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:30:06 crc kubenswrapper[4857]: E1128 15:30:06.229171 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:30:06 crc kubenswrapper[4857]: I1128 15:30:06.241487 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49637a1d-30df-4680-9bca-b28a28be48cb" path="/var/lib/kubelet/pods/49637a1d-30df-4680-9bca-b28a28be48cb/volumes" Nov 28 15:30:17 crc kubenswrapper[4857]: I1128 15:30:17.229796 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:30:17 crc kubenswrapper[4857]: E1128 15:30:17.230844 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:30:31 crc kubenswrapper[4857]: I1128 15:30:31.158193 4857 scope.go:117] "RemoveContainer" containerID="8b2c782b20a0ea4fde578586a42254b1ccdd5f92f08b4c1fc8f114c5aa693d30" Nov 28 15:30:32 crc kubenswrapper[4857]: I1128 15:30:32.229101 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:30:32 crc kubenswrapper[4857]: E1128 15:30:32.230022 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:30:45 crc kubenswrapper[4857]: I1128 15:30:45.303107 4857 generic.go:334] "Generic (PLEG): container finished" podID="32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" containerID="706da768641909ae1160efb5dbc3213de87db6584dd4aa35ed1381f0a95e8bc3" exitCode=0 Nov 28 15:30:45 crc kubenswrapper[4857]: I1128 15:30:45.303186 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" event={"ID":"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12","Type":"ContainerDied","Data":"706da768641909ae1160efb5dbc3213de87db6584dd4aa35ed1381f0a95e8bc3"} Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.779966 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.837289 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-tripleo-cleanup-combined-ca-bundle\") pod \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.837529 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8wlc\" (UniqueName: \"kubernetes.io/projected/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-kube-api-access-f8wlc\") pod \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.837590 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ceph\") pod \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.838771 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ssh-key\") pod \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.839118 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-inventory\") pod \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\" (UID: \"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12\") " Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.843655 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-kube-api-access-f8wlc" (OuterVolumeSpecName: "kube-api-access-f8wlc") pod "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" (UID: "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12"). InnerVolumeSpecName "kube-api-access-f8wlc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.843824 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ceph" (OuterVolumeSpecName: "ceph") pod "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" (UID: "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.844375 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" (UID: "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.873861 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" (UID: "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.892714 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-inventory" (OuterVolumeSpecName: "inventory") pod "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" (UID: "32fb95d9-2633-4efe-bcc4-e74e6fb9bd12"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.942095 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8wlc\" (UniqueName: \"kubernetes.io/projected/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-kube-api-access-f8wlc\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.942132 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.942142 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.942150 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:46 crc kubenswrapper[4857]: I1128 15:30:46.942160 4857 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32fb95d9-2633-4efe-bcc4-e74e6fb9bd12-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:30:47 crc kubenswrapper[4857]: I1128 15:30:47.229542 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:30:47 crc kubenswrapper[4857]: E1128 15:30:47.229762 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:30:47 crc kubenswrapper[4857]: I1128 15:30:47.322906 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" event={"ID":"32fb95d9-2633-4efe-bcc4-e74e6fb9bd12","Type":"ContainerDied","Data":"bc937da3f7f295a9a1e9201f9ac642e8d8872d1bf68de7ca64ccc85543360781"} Nov 28 15:30:47 crc kubenswrapper[4857]: I1128 15:30:47.322964 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc937da3f7f295a9a1e9201f9ac642e8d8872d1bf68de7ca64ccc85543360781" Nov 28 15:30:47 crc kubenswrapper[4857]: I1128 15:30:47.322998 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.947858 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-9gtn8"] Nov 28 15:30:48 crc kubenswrapper[4857]: E1128 15:30:48.948686 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e8306c3-562b-40d1-a682-a6894fc7bc51" containerName="collect-profiles" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.948698 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e8306c3-562b-40d1-a682-a6894fc7bc51" containerName="collect-profiles" Nov 28 15:30:48 crc kubenswrapper[4857]: E1128 15:30:48.948741 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.948748 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.948978 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="32fb95d9-2633-4efe-bcc4-e74e6fb9bd12" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.948991 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e8306c3-562b-40d1-a682-a6894fc7bc51" containerName="collect-profiles" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.949754 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.953489 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.953864 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.953925 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.962060 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:30:48 crc kubenswrapper[4857]: I1128 15:30:48.966302 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-9gtn8"] Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.095189 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.095574 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-inventory\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.095635 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.095689 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52vh5\" (UniqueName: \"kubernetes.io/projected/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-kube-api-access-52vh5\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.095761 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ceph\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.198155 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.198278 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-inventory\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.198366 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.198443 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52vh5\" (UniqueName: \"kubernetes.io/projected/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-kube-api-access-52vh5\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.198540 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ceph\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.207411 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.207538 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ceph\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.208100 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-inventory\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.209388 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.225000 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52vh5\" (UniqueName: \"kubernetes.io/projected/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-kube-api-access-52vh5\") pod \"bootstrap-openstack-openstack-cell1-9gtn8\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.284725 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:30:49 crc kubenswrapper[4857]: I1128 15:30:49.919280 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-9gtn8"] Nov 28 15:30:50 crc kubenswrapper[4857]: I1128 15:30:50.363057 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" event={"ID":"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb","Type":"ContainerStarted","Data":"6856907f15e42f981d5ee653794a094a8039839d2552fc33dcd629b5f67a9e4d"} Nov 28 15:30:51 crc kubenswrapper[4857]: I1128 15:30:51.375536 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" event={"ID":"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb","Type":"ContainerStarted","Data":"62a25325708d761ed0141cddbfa816ec84610a74b54e94f967c2f17dc160d1e9"} Nov 28 15:30:51 crc kubenswrapper[4857]: I1128 15:30:51.400444 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" podStartSLOduration=2.7432437480000003 podStartE2EDuration="3.40042349s" podCreationTimestamp="2025-11-28 15:30:48 +0000 UTC" firstStartedPulling="2025-11-28 15:30:49.922377484 +0000 UTC m=+7300.046318931" lastFinishedPulling="2025-11-28 15:30:50.579557226 +0000 UTC m=+7300.703498673" observedRunningTime="2025-11-28 15:30:51.395366815 +0000 UTC m=+7301.519308262" watchObservedRunningTime="2025-11-28 15:30:51.40042349 +0000 UTC m=+7301.524364927" Nov 28 15:31:01 crc kubenswrapper[4857]: I1128 15:31:01.229653 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:31:01 crc kubenswrapper[4857]: E1128 15:31:01.230625 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:31:14 crc kubenswrapper[4857]: I1128 15:31:14.231425 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:31:14 crc kubenswrapper[4857]: E1128 15:31:14.232883 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:31:26 crc kubenswrapper[4857]: I1128 15:31:26.230062 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:31:26 crc kubenswrapper[4857]: E1128 15:31:26.231162 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:31:37 crc kubenswrapper[4857]: I1128 15:31:37.229133 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:31:37 crc kubenswrapper[4857]: E1128 15:31:37.230175 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:31:48 crc kubenswrapper[4857]: I1128 15:31:48.229507 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:31:48 crc kubenswrapper[4857]: E1128 15:31:48.230600 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:32:00 crc kubenswrapper[4857]: I1128 15:32:00.240674 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:32:00 crc kubenswrapper[4857]: E1128 15:32:00.242656 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:32:01 crc kubenswrapper[4857]: I1128 15:32:01.995868 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rfpwr"] Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.002582 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.041538 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rfpwr"] Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.118807 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-catalog-content\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.118923 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-utilities\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.118981 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtm9l\" (UniqueName: \"kubernetes.io/projected/1f9ceabb-4609-4522-9070-fe7bd9905600-kube-api-access-gtm9l\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.222302 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-utilities\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.222393 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtm9l\" (UniqueName: \"kubernetes.io/projected/1f9ceabb-4609-4522-9070-fe7bd9905600-kube-api-access-gtm9l\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.222644 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-catalog-content\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.223484 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-utilities\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.223575 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-catalog-content\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.249077 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtm9l\" (UniqueName: \"kubernetes.io/projected/1f9ceabb-4609-4522-9070-fe7bd9905600-kube-api-access-gtm9l\") pod \"redhat-marketplace-rfpwr\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.335574 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:02 crc kubenswrapper[4857]: I1128 15:32:02.876490 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rfpwr"] Nov 28 15:32:03 crc kubenswrapper[4857]: I1128 15:32:03.177488 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rfpwr" event={"ID":"1f9ceabb-4609-4522-9070-fe7bd9905600","Type":"ContainerStarted","Data":"8ee75a43d2c7689a476e240485abb9c1f703fe5e186fce0d497e8bd3b940976d"} Nov 28 15:32:05 crc kubenswrapper[4857]: I1128 15:32:05.196007 4857 generic.go:334] "Generic (PLEG): container finished" podID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerID="4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa" exitCode=0 Nov 28 15:32:05 crc kubenswrapper[4857]: I1128 15:32:05.196119 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rfpwr" event={"ID":"1f9ceabb-4609-4522-9070-fe7bd9905600","Type":"ContainerDied","Data":"4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa"} Nov 28 15:32:05 crc kubenswrapper[4857]: I1128 15:32:05.198258 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:32:07 crc kubenswrapper[4857]: I1128 15:32:07.226029 4857 generic.go:334] "Generic (PLEG): container finished" podID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerID="8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad" exitCode=0 Nov 28 15:32:07 crc kubenswrapper[4857]: I1128 15:32:07.226096 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rfpwr" event={"ID":"1f9ceabb-4609-4522-9070-fe7bd9905600","Type":"ContainerDied","Data":"8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad"} Nov 28 15:32:09 crc kubenswrapper[4857]: I1128 15:32:09.262927 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rfpwr" event={"ID":"1f9ceabb-4609-4522-9070-fe7bd9905600","Type":"ContainerStarted","Data":"6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9"} Nov 28 15:32:09 crc kubenswrapper[4857]: I1128 15:32:09.301986 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rfpwr" podStartSLOduration=5.526400288 podStartE2EDuration="8.301962557s" podCreationTimestamp="2025-11-28 15:32:01 +0000 UTC" firstStartedPulling="2025-11-28 15:32:05.198025031 +0000 UTC m=+7375.321966468" lastFinishedPulling="2025-11-28 15:32:07.97358727 +0000 UTC m=+7378.097528737" observedRunningTime="2025-11-28 15:32:09.288284753 +0000 UTC m=+7379.412226180" watchObservedRunningTime="2025-11-28 15:32:09.301962557 +0000 UTC m=+7379.425904004" Nov 28 15:32:12 crc kubenswrapper[4857]: I1128 15:32:12.336642 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:12 crc kubenswrapper[4857]: I1128 15:32:12.338122 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:12 crc kubenswrapper[4857]: I1128 15:32:12.383417 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:13 crc kubenswrapper[4857]: I1128 15:32:13.380075 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:13 crc kubenswrapper[4857]: I1128 15:32:13.451415 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rfpwr"] Nov 28 15:32:15 crc kubenswrapper[4857]: I1128 15:32:15.229783 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:32:15 crc kubenswrapper[4857]: E1128 15:32:15.230723 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:32:15 crc kubenswrapper[4857]: I1128 15:32:15.342387 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rfpwr" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="registry-server" containerID="cri-o://6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9" gracePeriod=2 Nov 28 15:32:15 crc kubenswrapper[4857]: I1128 15:32:15.960023 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.084555 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-utilities\") pod \"1f9ceabb-4609-4522-9070-fe7bd9905600\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.084639 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-catalog-content\") pod \"1f9ceabb-4609-4522-9070-fe7bd9905600\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.084738 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtm9l\" (UniqueName: \"kubernetes.io/projected/1f9ceabb-4609-4522-9070-fe7bd9905600-kube-api-access-gtm9l\") pod \"1f9ceabb-4609-4522-9070-fe7bd9905600\" (UID: \"1f9ceabb-4609-4522-9070-fe7bd9905600\") " Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.087116 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-utilities" (OuterVolumeSpecName: "utilities") pod "1f9ceabb-4609-4522-9070-fe7bd9905600" (UID: "1f9ceabb-4609-4522-9070-fe7bd9905600"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.092263 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f9ceabb-4609-4522-9070-fe7bd9905600-kube-api-access-gtm9l" (OuterVolumeSpecName: "kube-api-access-gtm9l") pod "1f9ceabb-4609-4522-9070-fe7bd9905600" (UID: "1f9ceabb-4609-4522-9070-fe7bd9905600"). InnerVolumeSpecName "kube-api-access-gtm9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.103373 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f9ceabb-4609-4522-9070-fe7bd9905600" (UID: "1f9ceabb-4609-4522-9070-fe7bd9905600"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.186919 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.186963 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f9ceabb-4609-4522-9070-fe7bd9905600-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.186977 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtm9l\" (UniqueName: \"kubernetes.io/projected/1f9ceabb-4609-4522-9070-fe7bd9905600-kube-api-access-gtm9l\") on node \"crc\" DevicePath \"\"" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.357249 4857 generic.go:334] "Generic (PLEG): container finished" podID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerID="6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9" exitCode=0 Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.357325 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rfpwr" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.357343 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rfpwr" event={"ID":"1f9ceabb-4609-4522-9070-fe7bd9905600","Type":"ContainerDied","Data":"6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9"} Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.357874 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rfpwr" event={"ID":"1f9ceabb-4609-4522-9070-fe7bd9905600","Type":"ContainerDied","Data":"8ee75a43d2c7689a476e240485abb9c1f703fe5e186fce0d497e8bd3b940976d"} Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.357896 4857 scope.go:117] "RemoveContainer" containerID="6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.389742 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rfpwr"] Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.390899 4857 scope.go:117] "RemoveContainer" containerID="8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.405918 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rfpwr"] Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.430492 4857 scope.go:117] "RemoveContainer" containerID="4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.480253 4857 scope.go:117] "RemoveContainer" containerID="6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9" Nov 28 15:32:16 crc kubenswrapper[4857]: E1128 15:32:16.480799 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9\": container with ID starting with 6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9 not found: ID does not exist" containerID="6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.480877 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9"} err="failed to get container status \"6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9\": rpc error: code = NotFound desc = could not find container \"6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9\": container with ID starting with 6fb6889802ac3f0ab16742fb574ef98afd636146902d2f3d95b94a76a85ccdf9 not found: ID does not exist" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.480926 4857 scope.go:117] "RemoveContainer" containerID="8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad" Nov 28 15:32:16 crc kubenswrapper[4857]: E1128 15:32:16.481372 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad\": container with ID starting with 8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad not found: ID does not exist" containerID="8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.481403 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad"} err="failed to get container status \"8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad\": rpc error: code = NotFound desc = could not find container \"8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad\": container with ID starting with 8c94184572624d9df662fe363f5895fa4a399a30f5ec482853edff03edc8f5ad not found: ID does not exist" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.481423 4857 scope.go:117] "RemoveContainer" containerID="4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa" Nov 28 15:32:16 crc kubenswrapper[4857]: E1128 15:32:16.481860 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa\": container with ID starting with 4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa not found: ID does not exist" containerID="4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa" Nov 28 15:32:16 crc kubenswrapper[4857]: I1128 15:32:16.481896 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa"} err="failed to get container status \"4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa\": rpc error: code = NotFound desc = could not find container \"4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa\": container with ID starting with 4b51dcc51e8a7a4cd77223c3e1d1c3c3bc569e48395c084e53e0d2d94d4762aa not found: ID does not exist" Nov 28 15:32:18 crc kubenswrapper[4857]: I1128 15:32:18.243205 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" path="/var/lib/kubelet/pods/1f9ceabb-4609-4522-9070-fe7bd9905600/volumes" Nov 28 15:32:28 crc kubenswrapper[4857]: I1128 15:32:28.229238 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:32:28 crc kubenswrapper[4857]: E1128 15:32:28.230276 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:32:42 crc kubenswrapper[4857]: I1128 15:32:42.229810 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:32:42 crc kubenswrapper[4857]: E1128 15:32:42.231242 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.981535 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c94nt"] Nov 28 15:32:46 crc kubenswrapper[4857]: E1128 15:32:46.983033 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="registry-server" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.983110 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="registry-server" Nov 28 15:32:46 crc kubenswrapper[4857]: E1128 15:32:46.983194 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="extract-utilities" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.983252 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="extract-utilities" Nov 28 15:32:46 crc kubenswrapper[4857]: E1128 15:32:46.983314 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="extract-content" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.983372 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="extract-content" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.983821 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f9ceabb-4609-4522-9070-fe7bd9905600" containerName="registry-server" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.985548 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:46 crc kubenswrapper[4857]: I1128 15:32:46.997796 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c94nt"] Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.109852 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-catalog-content\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.110263 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-utilities\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.110793 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nmrp\" (UniqueName: \"kubernetes.io/projected/869bd67a-b271-4bc4-bf0e-63d5b1650279-kube-api-access-2nmrp\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.212754 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-catalog-content\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.212832 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-utilities\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.213009 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nmrp\" (UniqueName: \"kubernetes.io/projected/869bd67a-b271-4bc4-bf0e-63d5b1650279-kube-api-access-2nmrp\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.213690 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-catalog-content\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.213889 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-utilities\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.234174 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nmrp\" (UniqueName: \"kubernetes.io/projected/869bd67a-b271-4bc4-bf0e-63d5b1650279-kube-api-access-2nmrp\") pod \"certified-operators-c94nt\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.347916 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:47 crc kubenswrapper[4857]: I1128 15:32:47.880502 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c94nt"] Nov 28 15:32:47 crc kubenswrapper[4857]: W1128 15:32:47.883680 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod869bd67a_b271_4bc4_bf0e_63d5b1650279.slice/crio-110464baebba16df804b484b458221d2c3c9f59b470c49984eca0eac5d54ae24 WatchSource:0}: Error finding container 110464baebba16df804b484b458221d2c3c9f59b470c49984eca0eac5d54ae24: Status 404 returned error can't find the container with id 110464baebba16df804b484b458221d2c3c9f59b470c49984eca0eac5d54ae24 Nov 28 15:32:48 crc kubenswrapper[4857]: I1128 15:32:48.769657 4857 generic.go:334] "Generic (PLEG): container finished" podID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerID="ac52586b53cb27e651dc8e66453237bb82a7719b87d4e16ab6d0f63aefc6fa4b" exitCode=0 Nov 28 15:32:48 crc kubenswrapper[4857]: I1128 15:32:48.769784 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c94nt" event={"ID":"869bd67a-b271-4bc4-bf0e-63d5b1650279","Type":"ContainerDied","Data":"ac52586b53cb27e651dc8e66453237bb82a7719b87d4e16ab6d0f63aefc6fa4b"} Nov 28 15:32:48 crc kubenswrapper[4857]: I1128 15:32:48.770159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c94nt" event={"ID":"869bd67a-b271-4bc4-bf0e-63d5b1650279","Type":"ContainerStarted","Data":"110464baebba16df804b484b458221d2c3c9f59b470c49984eca0eac5d54ae24"} Nov 28 15:32:50 crc kubenswrapper[4857]: I1128 15:32:50.795882 4857 generic.go:334] "Generic (PLEG): container finished" podID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerID="761b7ab46be62eb4984f426d46f0ad0da3b77a3ce342e963e0f7872be893cc91" exitCode=0 Nov 28 15:32:50 crc kubenswrapper[4857]: I1128 15:32:50.796093 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c94nt" event={"ID":"869bd67a-b271-4bc4-bf0e-63d5b1650279","Type":"ContainerDied","Data":"761b7ab46be62eb4984f426d46f0ad0da3b77a3ce342e963e0f7872be893cc91"} Nov 28 15:32:51 crc kubenswrapper[4857]: I1128 15:32:51.810184 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c94nt" event={"ID":"869bd67a-b271-4bc4-bf0e-63d5b1650279","Type":"ContainerStarted","Data":"88a487d44d52b46d1be0bfcdb4a2b149e69517ea31a738921015d57ba076abdc"} Nov 28 15:32:51 crc kubenswrapper[4857]: I1128 15:32:51.843451 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c94nt" podStartSLOduration=3.172764098 podStartE2EDuration="5.843420693s" podCreationTimestamp="2025-11-28 15:32:46 +0000 UTC" firstStartedPulling="2025-11-28 15:32:48.772601315 +0000 UTC m=+7418.896542772" lastFinishedPulling="2025-11-28 15:32:51.44325793 +0000 UTC m=+7421.567199367" observedRunningTime="2025-11-28 15:32:51.835353988 +0000 UTC m=+7421.959295445" watchObservedRunningTime="2025-11-28 15:32:51.843420693 +0000 UTC m=+7421.967362150" Nov 28 15:32:55 crc kubenswrapper[4857]: I1128 15:32:55.230416 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:32:55 crc kubenswrapper[4857]: E1128 15:32:55.231378 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:32:57 crc kubenswrapper[4857]: I1128 15:32:57.348505 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:57 crc kubenswrapper[4857]: I1128 15:32:57.348578 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:57 crc kubenswrapper[4857]: I1128 15:32:57.405294 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:32:57 crc kubenswrapper[4857]: I1128 15:32:57.977841 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:33:00 crc kubenswrapper[4857]: I1128 15:33:00.208097 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c94nt"] Nov 28 15:33:00 crc kubenswrapper[4857]: I1128 15:33:00.209057 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c94nt" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="registry-server" containerID="cri-o://88a487d44d52b46d1be0bfcdb4a2b149e69517ea31a738921015d57ba076abdc" gracePeriod=2 Nov 28 15:33:00 crc kubenswrapper[4857]: I1128 15:33:00.943372 4857 generic.go:334] "Generic (PLEG): container finished" podID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerID="88a487d44d52b46d1be0bfcdb4a2b149e69517ea31a738921015d57ba076abdc" exitCode=0 Nov 28 15:33:00 crc kubenswrapper[4857]: I1128 15:33:00.943490 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c94nt" event={"ID":"869bd67a-b271-4bc4-bf0e-63d5b1650279","Type":"ContainerDied","Data":"88a487d44d52b46d1be0bfcdb4a2b149e69517ea31a738921015d57ba076abdc"} Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.296463 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.317320 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nmrp\" (UniqueName: \"kubernetes.io/projected/869bd67a-b271-4bc4-bf0e-63d5b1650279-kube-api-access-2nmrp\") pod \"869bd67a-b271-4bc4-bf0e-63d5b1650279\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.317641 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-utilities\") pod \"869bd67a-b271-4bc4-bf0e-63d5b1650279\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.317924 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-catalog-content\") pod \"869bd67a-b271-4bc4-bf0e-63d5b1650279\" (UID: \"869bd67a-b271-4bc4-bf0e-63d5b1650279\") " Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.318545 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-utilities" (OuterVolumeSpecName: "utilities") pod "869bd67a-b271-4bc4-bf0e-63d5b1650279" (UID: "869bd67a-b271-4bc4-bf0e-63d5b1650279"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.325681 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/869bd67a-b271-4bc4-bf0e-63d5b1650279-kube-api-access-2nmrp" (OuterVolumeSpecName: "kube-api-access-2nmrp") pod "869bd67a-b271-4bc4-bf0e-63d5b1650279" (UID: "869bd67a-b271-4bc4-bf0e-63d5b1650279"). InnerVolumeSpecName "kube-api-access-2nmrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.389161 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "869bd67a-b271-4bc4-bf0e-63d5b1650279" (UID: "869bd67a-b271-4bc4-bf0e-63d5b1650279"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.422224 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.422269 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/869bd67a-b271-4bc4-bf0e-63d5b1650279-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.422283 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nmrp\" (UniqueName: \"kubernetes.io/projected/869bd67a-b271-4bc4-bf0e-63d5b1650279-kube-api-access-2nmrp\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.961591 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c94nt" event={"ID":"869bd67a-b271-4bc4-bf0e-63d5b1650279","Type":"ContainerDied","Data":"110464baebba16df804b484b458221d2c3c9f59b470c49984eca0eac5d54ae24"} Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.961707 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c94nt" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.962190 4857 scope.go:117] "RemoveContainer" containerID="88a487d44d52b46d1be0bfcdb4a2b149e69517ea31a738921015d57ba076abdc" Nov 28 15:33:01 crc kubenswrapper[4857]: I1128 15:33:01.999616 4857 scope.go:117] "RemoveContainer" containerID="761b7ab46be62eb4984f426d46f0ad0da3b77a3ce342e963e0f7872be893cc91" Nov 28 15:33:02 crc kubenswrapper[4857]: I1128 15:33:02.033415 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c94nt"] Nov 28 15:33:02 crc kubenswrapper[4857]: I1128 15:33:02.046727 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c94nt"] Nov 28 15:33:02 crc kubenswrapper[4857]: I1128 15:33:02.047811 4857 scope.go:117] "RemoveContainer" containerID="ac52586b53cb27e651dc8e66453237bb82a7719b87d4e16ab6d0f63aefc6fa4b" Nov 28 15:33:02 crc kubenswrapper[4857]: I1128 15:33:02.258537 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" path="/var/lib/kubelet/pods/869bd67a-b271-4bc4-bf0e-63d5b1650279/volumes" Nov 28 15:33:09 crc kubenswrapper[4857]: I1128 15:33:09.229307 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:33:09 crc kubenswrapper[4857]: E1128 15:33:09.230713 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:33:20 crc kubenswrapper[4857]: I1128 15:33:20.239217 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:33:20 crc kubenswrapper[4857]: E1128 15:33:20.241066 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:33:31 crc kubenswrapper[4857]: I1128 15:33:31.230155 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:33:31 crc kubenswrapper[4857]: E1128 15:33:31.232234 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:33:46 crc kubenswrapper[4857]: I1128 15:33:46.229390 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:33:46 crc kubenswrapper[4857]: E1128 15:33:46.230399 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:33:54 crc kubenswrapper[4857]: I1128 15:33:54.615865 4857 generic.go:334] "Generic (PLEG): container finished" podID="d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" containerID="62a25325708d761ed0141cddbfa816ec84610a74b54e94f967c2f17dc160d1e9" exitCode=0 Nov 28 15:33:54 crc kubenswrapper[4857]: I1128 15:33:54.616417 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" event={"ID":"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb","Type":"ContainerDied","Data":"62a25325708d761ed0141cddbfa816ec84610a74b54e94f967c2f17dc160d1e9"} Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.146856 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.196661 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ceph\") pod \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.197125 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-bootstrap-combined-ca-bundle\") pod \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.197502 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52vh5\" (UniqueName: \"kubernetes.io/projected/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-kube-api-access-52vh5\") pod \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.197562 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ssh-key\") pod \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.197633 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-inventory\") pod \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\" (UID: \"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb\") " Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.210815 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" (UID: "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.221698 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ceph" (OuterVolumeSpecName: "ceph") pod "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" (UID: "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.222577 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-kube-api-access-52vh5" (OuterVolumeSpecName: "kube-api-access-52vh5") pod "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" (UID: "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb"). InnerVolumeSpecName "kube-api-access-52vh5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.248570 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" (UID: "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.269071 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-inventory" (OuterVolumeSpecName: "inventory") pod "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" (UID: "d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.301785 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.301831 4857 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.301847 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52vh5\" (UniqueName: \"kubernetes.io/projected/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-kube-api-access-52vh5\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.301858 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.301871 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.638914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" event={"ID":"d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb","Type":"ContainerDied","Data":"6856907f15e42f981d5ee653794a094a8039839d2552fc33dcd629b5f67a9e4d"} Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.638995 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6856907f15e42f981d5ee653794a094a8039839d2552fc33dcd629b5f67a9e4d" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.639006 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-9gtn8" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.793399 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-zsc8l"] Nov 28 15:33:56 crc kubenswrapper[4857]: E1128 15:33:56.793916 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" containerName="bootstrap-openstack-openstack-cell1" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.793934 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" containerName="bootstrap-openstack-openstack-cell1" Nov 28 15:33:56 crc kubenswrapper[4857]: E1128 15:33:56.793973 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="extract-utilities" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.793982 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="extract-utilities" Nov 28 15:33:56 crc kubenswrapper[4857]: E1128 15:33:56.794001 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="extract-content" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.794007 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="extract-content" Nov 28 15:33:56 crc kubenswrapper[4857]: E1128 15:33:56.794025 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="registry-server" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.794032 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="registry-server" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.794254 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb" containerName="bootstrap-openstack-openstack-cell1" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.794278 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="869bd67a-b271-4bc4-bf0e-63d5b1650279" containerName="registry-server" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.798808 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.802750 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.802907 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.803292 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.804838 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.805669 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-zsc8l"] Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.814074 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8n2t\" (UniqueName: \"kubernetes.io/projected/0eb2007a-4a59-43fa-980c-e76277c303c1-kube-api-access-m8n2t\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.814121 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-inventory\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.814270 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ceph\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.814376 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.917331 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-inventory\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.917450 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ceph\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.917541 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.917698 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8n2t\" (UniqueName: \"kubernetes.io/projected/0eb2007a-4a59-43fa-980c-e76277c303c1-kube-api-access-m8n2t\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.933266 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.934802 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-inventory\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.945183 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ceph\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:56 crc kubenswrapper[4857]: I1128 15:33:56.951074 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8n2t\" (UniqueName: \"kubernetes.io/projected/0eb2007a-4a59-43fa-980c-e76277c303c1-kube-api-access-m8n2t\") pod \"download-cache-openstack-openstack-cell1-zsc8l\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:57 crc kubenswrapper[4857]: I1128 15:33:57.123998 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:33:57 crc kubenswrapper[4857]: I1128 15:33:57.771701 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-zsc8l"] Nov 28 15:33:58 crc kubenswrapper[4857]: I1128 15:33:58.670565 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" event={"ID":"0eb2007a-4a59-43fa-980c-e76277c303c1","Type":"ContainerStarted","Data":"59d4af019f23bd6b38b17aab28084f74e578c74d448ad3fa229f4a3a7424aeab"} Nov 28 15:33:58 crc kubenswrapper[4857]: I1128 15:33:58.671098 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" event={"ID":"0eb2007a-4a59-43fa-980c-e76277c303c1","Type":"ContainerStarted","Data":"1be3f2e6bdda0ffa12bea2b76f000b37a6f5f2acaba791f470634942aa6631b0"} Nov 28 15:33:58 crc kubenswrapper[4857]: I1128 15:33:58.700329 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" podStartSLOduration=2.482715161 podStartE2EDuration="2.700217747s" podCreationTimestamp="2025-11-28 15:33:56 +0000 UTC" firstStartedPulling="2025-11-28 15:33:57.777580962 +0000 UTC m=+7487.901522399" lastFinishedPulling="2025-11-28 15:33:57.995083508 +0000 UTC m=+7488.119024985" observedRunningTime="2025-11-28 15:33:58.688263848 +0000 UTC m=+7488.812205325" watchObservedRunningTime="2025-11-28 15:33:58.700217747 +0000 UTC m=+7488.824159224" Nov 28 15:34:00 crc kubenswrapper[4857]: I1128 15:34:00.230162 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:34:00 crc kubenswrapper[4857]: E1128 15:34:00.231499 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:34:11 crc kubenswrapper[4857]: I1128 15:34:11.229661 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:34:11 crc kubenswrapper[4857]: E1128 15:34:11.231234 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:34:22 crc kubenswrapper[4857]: I1128 15:34:22.230102 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:34:23 crc kubenswrapper[4857]: I1128 15:34:23.994297 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"a38ec213781fad7de97f923228698d06b20cca26201275d6ad39d000d8a37327"} Nov 28 15:35:34 crc kubenswrapper[4857]: I1128 15:35:34.918049 4857 generic.go:334] "Generic (PLEG): container finished" podID="0eb2007a-4a59-43fa-980c-e76277c303c1" containerID="59d4af019f23bd6b38b17aab28084f74e578c74d448ad3fa229f4a3a7424aeab" exitCode=0 Nov 28 15:35:34 crc kubenswrapper[4857]: I1128 15:35:34.918748 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" event={"ID":"0eb2007a-4a59-43fa-980c-e76277c303c1","Type":"ContainerDied","Data":"59d4af019f23bd6b38b17aab28084f74e578c74d448ad3fa229f4a3a7424aeab"} Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.527047 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.712494 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key\") pod \"0eb2007a-4a59-43fa-980c-e76277c303c1\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.712546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ceph\") pod \"0eb2007a-4a59-43fa-980c-e76277c303c1\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.712569 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8n2t\" (UniqueName: \"kubernetes.io/projected/0eb2007a-4a59-43fa-980c-e76277c303c1-kube-api-access-m8n2t\") pod \"0eb2007a-4a59-43fa-980c-e76277c303c1\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.712742 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-inventory\") pod \"0eb2007a-4a59-43fa-980c-e76277c303c1\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.719361 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ceph" (OuterVolumeSpecName: "ceph") pod "0eb2007a-4a59-43fa-980c-e76277c303c1" (UID: "0eb2007a-4a59-43fa-980c-e76277c303c1"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.725129 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0eb2007a-4a59-43fa-980c-e76277c303c1-kube-api-access-m8n2t" (OuterVolumeSpecName: "kube-api-access-m8n2t") pod "0eb2007a-4a59-43fa-980c-e76277c303c1" (UID: "0eb2007a-4a59-43fa-980c-e76277c303c1"). InnerVolumeSpecName "kube-api-access-m8n2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:35:36 crc kubenswrapper[4857]: E1128 15:35:36.747525 4857 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key podName:0eb2007a-4a59-43fa-980c-e76277c303c1 nodeName:}" failed. No retries permitted until 2025-11-28 15:35:37.247497908 +0000 UTC m=+7587.371439345 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ssh-key" (UniqueName: "kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key") pod "0eb2007a-4a59-43fa-980c-e76277c303c1" (UID: "0eb2007a-4a59-43fa-980c-e76277c303c1") : error deleting /var/lib/kubelet/pods/0eb2007a-4a59-43fa-980c-e76277c303c1/volume-subpaths: remove /var/lib/kubelet/pods/0eb2007a-4a59-43fa-980c-e76277c303c1/volume-subpaths: no such file or directory Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.750262 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-inventory" (OuterVolumeSpecName: "inventory") pod "0eb2007a-4a59-43fa-980c-e76277c303c1" (UID: "0eb2007a-4a59-43fa-980c-e76277c303c1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.815106 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8n2t\" (UniqueName: \"kubernetes.io/projected/0eb2007a-4a59-43fa-980c-e76277c303c1-kube-api-access-m8n2t\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.815144 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.815162 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.941321 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" event={"ID":"0eb2007a-4a59-43fa-980c-e76277c303c1","Type":"ContainerDied","Data":"1be3f2e6bdda0ffa12bea2b76f000b37a6f5f2acaba791f470634942aa6631b0"} Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.941377 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1be3f2e6bdda0ffa12bea2b76f000b37a6f5f2acaba791f470634942aa6631b0" Nov 28 15:35:36 crc kubenswrapper[4857]: I1128 15:35:36.941446 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-zsc8l" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.032202 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-tw87j"] Nov 28 15:35:37 crc kubenswrapper[4857]: E1128 15:35:37.032670 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0eb2007a-4a59-43fa-980c-e76277c303c1" containerName="download-cache-openstack-openstack-cell1" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.032687 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0eb2007a-4a59-43fa-980c-e76277c303c1" containerName="download-cache-openstack-openstack-cell1" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.032872 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0eb2007a-4a59-43fa-980c-e76277c303c1" containerName="download-cache-openstack-openstack-cell1" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.033710 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.046531 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-tw87j"] Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.122129 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ssh-key\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.122223 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ceph\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.122247 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxc5f\" (UniqueName: \"kubernetes.io/projected/957ab666-234a-4cd4-827f-746823e02d5a-kube-api-access-bxc5f\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.122384 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-inventory\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.224410 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ssh-key\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.224920 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ceph\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.225085 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxc5f\" (UniqueName: \"kubernetes.io/projected/957ab666-234a-4cd4-827f-746823e02d5a-kube-api-access-bxc5f\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.225308 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-inventory\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.230333 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ceph\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.231189 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ssh-key\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.231244 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-inventory\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.247117 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxc5f\" (UniqueName: \"kubernetes.io/projected/957ab666-234a-4cd4-827f-746823e02d5a-kube-api-access-bxc5f\") pod \"configure-network-openstack-openstack-cell1-tw87j\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.326877 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key\") pod \"0eb2007a-4a59-43fa-980c-e76277c303c1\" (UID: \"0eb2007a-4a59-43fa-980c-e76277c303c1\") " Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.331925 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0eb2007a-4a59-43fa-980c-e76277c303c1" (UID: "0eb2007a-4a59-43fa-980c-e76277c303c1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.351930 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:35:37 crc kubenswrapper[4857]: I1128 15:35:37.429516 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0eb2007a-4a59-43fa-980c-e76277c303c1-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:35:38 crc kubenswrapper[4857]: I1128 15:35:38.000225 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-tw87j"] Nov 28 15:35:38 crc kubenswrapper[4857]: I1128 15:35:38.976260 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" event={"ID":"957ab666-234a-4cd4-827f-746823e02d5a","Type":"ContainerStarted","Data":"97f80cf9c78a05d78d013494ed54eb3b13212ed013a66b4642d9bb1faca4c65f"} Nov 28 15:35:38 crc kubenswrapper[4857]: I1128 15:35:38.976974 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" event={"ID":"957ab666-234a-4cd4-827f-746823e02d5a","Type":"ContainerStarted","Data":"c28c43c8466995f04fa280a749e96a3b01d8e4027f1335b510e49991980d6e0b"} Nov 28 15:35:39 crc kubenswrapper[4857]: I1128 15:35:39.015551 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" podStartSLOduration=1.8323157920000002 podStartE2EDuration="2.015523594s" podCreationTimestamp="2025-11-28 15:35:37 +0000 UTC" firstStartedPulling="2025-11-28 15:35:38.010088763 +0000 UTC m=+7588.134030200" lastFinishedPulling="2025-11-28 15:35:38.193296565 +0000 UTC m=+7588.317238002" observedRunningTime="2025-11-28 15:35:39.000622687 +0000 UTC m=+7589.124564144" watchObservedRunningTime="2025-11-28 15:35:39.015523594 +0000 UTC m=+7589.139465031" Nov 28 15:36:41 crc kubenswrapper[4857]: I1128 15:36:41.309238 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:36:41 crc kubenswrapper[4857]: I1128 15:36:41.309920 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:36:57 crc kubenswrapper[4857]: I1128 15:36:57.929092 4857 generic.go:334] "Generic (PLEG): container finished" podID="957ab666-234a-4cd4-827f-746823e02d5a" containerID="97f80cf9c78a05d78d013494ed54eb3b13212ed013a66b4642d9bb1faca4c65f" exitCode=0 Nov 28 15:36:57 crc kubenswrapper[4857]: I1128 15:36:57.929168 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" event={"ID":"957ab666-234a-4cd4-827f-746823e02d5a","Type":"ContainerDied","Data":"97f80cf9c78a05d78d013494ed54eb3b13212ed013a66b4642d9bb1faca4c65f"} Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.454717 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.588502 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-inventory\") pod \"957ab666-234a-4cd4-827f-746823e02d5a\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.589000 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxc5f\" (UniqueName: \"kubernetes.io/projected/957ab666-234a-4cd4-827f-746823e02d5a-kube-api-access-bxc5f\") pod \"957ab666-234a-4cd4-827f-746823e02d5a\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.589156 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ceph\") pod \"957ab666-234a-4cd4-827f-746823e02d5a\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.589209 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ssh-key\") pod \"957ab666-234a-4cd4-827f-746823e02d5a\" (UID: \"957ab666-234a-4cd4-827f-746823e02d5a\") " Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.595337 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/957ab666-234a-4cd4-827f-746823e02d5a-kube-api-access-bxc5f" (OuterVolumeSpecName: "kube-api-access-bxc5f") pod "957ab666-234a-4cd4-827f-746823e02d5a" (UID: "957ab666-234a-4cd4-827f-746823e02d5a"). InnerVolumeSpecName "kube-api-access-bxc5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.604828 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ceph" (OuterVolumeSpecName: "ceph") pod "957ab666-234a-4cd4-827f-746823e02d5a" (UID: "957ab666-234a-4cd4-827f-746823e02d5a"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.619743 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "957ab666-234a-4cd4-827f-746823e02d5a" (UID: "957ab666-234a-4cd4-827f-746823e02d5a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.631928 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-inventory" (OuterVolumeSpecName: "inventory") pod "957ab666-234a-4cd4-827f-746823e02d5a" (UID: "957ab666-234a-4cd4-827f-746823e02d5a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.691451 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.691491 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxc5f\" (UniqueName: \"kubernetes.io/projected/957ab666-234a-4cd4-827f-746823e02d5a-kube-api-access-bxc5f\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.691504 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.691513 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/957ab666-234a-4cd4-827f-746823e02d5a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.954011 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" event={"ID":"957ab666-234a-4cd4-827f-746823e02d5a","Type":"ContainerDied","Data":"c28c43c8466995f04fa280a749e96a3b01d8e4027f1335b510e49991980d6e0b"} Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.954064 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c28c43c8466995f04fa280a749e96a3b01d8e4027f1335b510e49991980d6e0b" Nov 28 15:36:59 crc kubenswrapper[4857]: I1128 15:36:59.954526 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-tw87j" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.071227 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-xt5v2"] Nov 28 15:37:00 crc kubenswrapper[4857]: E1128 15:37:00.071838 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="957ab666-234a-4cd4-827f-746823e02d5a" containerName="configure-network-openstack-openstack-cell1" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.071854 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="957ab666-234a-4cd4-827f-746823e02d5a" containerName="configure-network-openstack-openstack-cell1" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.072081 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="957ab666-234a-4cd4-827f-746823e02d5a" containerName="configure-network-openstack-openstack-cell1" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.072898 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.077058 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.077206 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.077332 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.077439 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.097922 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-xt5v2"] Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.204145 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ssh-key\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.204195 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-inventory\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.204227 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nd79\" (UniqueName: \"kubernetes.io/projected/64695c5c-02dd-4c82-bbc7-aa7262c16397-kube-api-access-4nd79\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.204322 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ceph\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.307289 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ceph\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.309624 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ssh-key\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.310154 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-inventory\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.310343 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nd79\" (UniqueName: \"kubernetes.io/projected/64695c5c-02dd-4c82-bbc7-aa7262c16397-kube-api-access-4nd79\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.311684 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ceph\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.313057 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ssh-key\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.323511 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-inventory\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.329818 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nd79\" (UniqueName: \"kubernetes.io/projected/64695c5c-02dd-4c82-bbc7-aa7262c16397-kube-api-access-4nd79\") pod \"validate-network-openstack-openstack-cell1-xt5v2\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:00 crc kubenswrapper[4857]: I1128 15:37:00.403033 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:01 crc kubenswrapper[4857]: I1128 15:37:01.004438 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-xt5v2"] Nov 28 15:37:01 crc kubenswrapper[4857]: I1128 15:37:01.978599 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" event={"ID":"64695c5c-02dd-4c82-bbc7-aa7262c16397","Type":"ContainerStarted","Data":"fa26f8122b711645e6671184a8e36c755d0c6784c23168afb21d67d424dbeebb"} Nov 28 15:37:01 crc kubenswrapper[4857]: I1128 15:37:01.979262 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" event={"ID":"64695c5c-02dd-4c82-bbc7-aa7262c16397","Type":"ContainerStarted","Data":"de415032b937ef641b61e1828aacb8848a2788d5753daa23dad2afd28e007faf"} Nov 28 15:37:02 crc kubenswrapper[4857]: I1128 15:37:02.013291 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" podStartSLOduration=1.8375197559999998 podStartE2EDuration="2.013258888s" podCreationTimestamp="2025-11-28 15:37:00 +0000 UTC" firstStartedPulling="2025-11-28 15:37:01.014097725 +0000 UTC m=+7671.138039172" lastFinishedPulling="2025-11-28 15:37:01.189836857 +0000 UTC m=+7671.313778304" observedRunningTime="2025-11-28 15:37:01.999504142 +0000 UTC m=+7672.123445589" watchObservedRunningTime="2025-11-28 15:37:02.013258888 +0000 UTC m=+7672.137200365" Nov 28 15:37:07 crc kubenswrapper[4857]: I1128 15:37:07.056913 4857 generic.go:334] "Generic (PLEG): container finished" podID="64695c5c-02dd-4c82-bbc7-aa7262c16397" containerID="fa26f8122b711645e6671184a8e36c755d0c6784c23168afb21d67d424dbeebb" exitCode=0 Nov 28 15:37:07 crc kubenswrapper[4857]: I1128 15:37:07.056992 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" event={"ID":"64695c5c-02dd-4c82-bbc7-aa7262c16397","Type":"ContainerDied","Data":"fa26f8122b711645e6671184a8e36c755d0c6784c23168afb21d67d424dbeebb"} Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.611362 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.729748 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ceph\") pod \"64695c5c-02dd-4c82-bbc7-aa7262c16397\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.730076 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nd79\" (UniqueName: \"kubernetes.io/projected/64695c5c-02dd-4c82-bbc7-aa7262c16397-kube-api-access-4nd79\") pod \"64695c5c-02dd-4c82-bbc7-aa7262c16397\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.730111 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-inventory\") pod \"64695c5c-02dd-4c82-bbc7-aa7262c16397\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.730134 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ssh-key\") pod \"64695c5c-02dd-4c82-bbc7-aa7262c16397\" (UID: \"64695c5c-02dd-4c82-bbc7-aa7262c16397\") " Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.736208 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ceph" (OuterVolumeSpecName: "ceph") pod "64695c5c-02dd-4c82-bbc7-aa7262c16397" (UID: "64695c5c-02dd-4c82-bbc7-aa7262c16397"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.736841 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64695c5c-02dd-4c82-bbc7-aa7262c16397-kube-api-access-4nd79" (OuterVolumeSpecName: "kube-api-access-4nd79") pod "64695c5c-02dd-4c82-bbc7-aa7262c16397" (UID: "64695c5c-02dd-4c82-bbc7-aa7262c16397"). InnerVolumeSpecName "kube-api-access-4nd79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.761906 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-inventory" (OuterVolumeSpecName: "inventory") pod "64695c5c-02dd-4c82-bbc7-aa7262c16397" (UID: "64695c5c-02dd-4c82-bbc7-aa7262c16397"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.765107 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "64695c5c-02dd-4c82-bbc7-aa7262c16397" (UID: "64695c5c-02dd-4c82-bbc7-aa7262c16397"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.833195 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.833235 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nd79\" (UniqueName: \"kubernetes.io/projected/64695c5c-02dd-4c82-bbc7-aa7262c16397-kube-api-access-4nd79\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.833251 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:08 crc kubenswrapper[4857]: I1128 15:37:08.833262 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/64695c5c-02dd-4c82-bbc7-aa7262c16397-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.081133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" event={"ID":"64695c5c-02dd-4c82-bbc7-aa7262c16397","Type":"ContainerDied","Data":"de415032b937ef641b61e1828aacb8848a2788d5753daa23dad2afd28e007faf"} Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.081447 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de415032b937ef641b61e1828aacb8848a2788d5753daa23dad2afd28e007faf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.081151 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-xt5v2" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.160184 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-csvdf"] Nov 28 15:37:09 crc kubenswrapper[4857]: E1128 15:37:09.160633 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64695c5c-02dd-4c82-bbc7-aa7262c16397" containerName="validate-network-openstack-openstack-cell1" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.160653 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="64695c5c-02dd-4c82-bbc7-aa7262c16397" containerName="validate-network-openstack-openstack-cell1" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.160890 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="64695c5c-02dd-4c82-bbc7-aa7262c16397" containerName="validate-network-openstack-openstack-cell1" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.161777 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.170449 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.170730 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.170823 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.170918 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.176223 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-csvdf"] Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.342398 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49ld6\" (UniqueName: \"kubernetes.io/projected/f6435193-0fdc-46d5-a005-72ac32d221b4-kube-api-access-49ld6\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.342447 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ceph\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.342720 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ssh-key\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.342832 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-inventory\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.444994 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49ld6\" (UniqueName: \"kubernetes.io/projected/f6435193-0fdc-46d5-a005-72ac32d221b4-kube-api-access-49ld6\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.445067 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ceph\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.445185 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ssh-key\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.445241 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-inventory\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.449982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ssh-key\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.450928 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-inventory\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.459341 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ceph\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.463678 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49ld6\" (UniqueName: \"kubernetes.io/projected/f6435193-0fdc-46d5-a005-72ac32d221b4-kube-api-access-49ld6\") pod \"install-os-openstack-openstack-cell1-csvdf\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:09 crc kubenswrapper[4857]: I1128 15:37:09.489061 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:10 crc kubenswrapper[4857]: I1128 15:37:10.021731 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-csvdf"] Nov 28 15:37:10 crc kubenswrapper[4857]: I1128 15:37:10.032799 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:37:10 crc kubenswrapper[4857]: I1128 15:37:10.094342 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-csvdf" event={"ID":"f6435193-0fdc-46d5-a005-72ac32d221b4","Type":"ContainerStarted","Data":"03971c0774d3893dcef051051794f27819b1a6c7252cb483564368e7ca77462c"} Nov 28 15:37:10 crc kubenswrapper[4857]: I1128 15:37:10.574451 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:37:11 crc kubenswrapper[4857]: I1128 15:37:11.117112 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-csvdf" event={"ID":"f6435193-0fdc-46d5-a005-72ac32d221b4","Type":"ContainerStarted","Data":"5cbcb1f631117d4c088efbf5a6c41e8b34c5ab41839d8f1882f906d0c9c631b8"} Nov 28 15:37:11 crc kubenswrapper[4857]: I1128 15:37:11.145417 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-csvdf" podStartSLOduration=1.606731089 podStartE2EDuration="2.145214628s" podCreationTimestamp="2025-11-28 15:37:09 +0000 UTC" firstStartedPulling="2025-11-28 15:37:10.032555939 +0000 UTC m=+7680.156497376" lastFinishedPulling="2025-11-28 15:37:10.571039438 +0000 UTC m=+7680.694980915" observedRunningTime="2025-11-28 15:37:11.133507816 +0000 UTC m=+7681.257449253" watchObservedRunningTime="2025-11-28 15:37:11.145214628 +0000 UTC m=+7681.269156065" Nov 28 15:37:11 crc kubenswrapper[4857]: I1128 15:37:11.308620 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:37:11 crc kubenswrapper[4857]: I1128 15:37:11.308689 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.553673 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pr22z"] Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.577676 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pr22z"] Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.577812 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.621317 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs9k8\" (UniqueName: \"kubernetes.io/projected/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-kube-api-access-rs9k8\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.621451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-catalog-content\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.621522 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-utilities\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.723364 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs9k8\" (UniqueName: \"kubernetes.io/projected/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-kube-api-access-rs9k8\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.723428 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-catalog-content\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.723478 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-utilities\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.724130 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-utilities\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.724379 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-catalog-content\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.748695 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs9k8\" (UniqueName: \"kubernetes.io/projected/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-kube-api-access-rs9k8\") pod \"redhat-operators-pr22z\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:25 crc kubenswrapper[4857]: I1128 15:37:25.928734 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:26 crc kubenswrapper[4857]: I1128 15:37:26.478547 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pr22z"] Nov 28 15:37:26 crc kubenswrapper[4857]: W1128 15:37:26.479901 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1a04f32_4cbc_44cf_bb14_b3d44ff24ca9.slice/crio-045c6067cc5b166fd825b57a43b9819343bcb6c835edf9b0e837300552279362 WatchSource:0}: Error finding container 045c6067cc5b166fd825b57a43b9819343bcb6c835edf9b0e837300552279362: Status 404 returned error can't find the container with id 045c6067cc5b166fd825b57a43b9819343bcb6c835edf9b0e837300552279362 Nov 28 15:37:27 crc kubenswrapper[4857]: I1128 15:37:27.346556 4857 generic.go:334] "Generic (PLEG): container finished" podID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerID="04607283e04a494091f7ddc925060dac9ee052342871c32d0fce4aee9899c52a" exitCode=0 Nov 28 15:37:27 crc kubenswrapper[4857]: I1128 15:37:27.346669 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerDied","Data":"04607283e04a494091f7ddc925060dac9ee052342871c32d0fce4aee9899c52a"} Nov 28 15:37:27 crc kubenswrapper[4857]: I1128 15:37:27.347233 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerStarted","Data":"045c6067cc5b166fd825b57a43b9819343bcb6c835edf9b0e837300552279362"} Nov 28 15:37:29 crc kubenswrapper[4857]: I1128 15:37:29.373159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerStarted","Data":"b7f64186f2e4e8b91c9ea2a446427a4e54b7c5eeab1175de345494b9fd9c4c63"} Nov 28 15:37:31 crc kubenswrapper[4857]: I1128 15:37:31.393928 4857 generic.go:334] "Generic (PLEG): container finished" podID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerID="b7f64186f2e4e8b91c9ea2a446427a4e54b7c5eeab1175de345494b9fd9c4c63" exitCode=0 Nov 28 15:37:31 crc kubenswrapper[4857]: I1128 15:37:31.393998 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerDied","Data":"b7f64186f2e4e8b91c9ea2a446427a4e54b7c5eeab1175de345494b9fd9c4c63"} Nov 28 15:37:32 crc kubenswrapper[4857]: I1128 15:37:32.408689 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerStarted","Data":"cddcb2cd1e1ee8abc8415148e18051ae32f9908ead6dcc093ebbb6fbe5e646fb"} Nov 28 15:37:32 crc kubenswrapper[4857]: I1128 15:37:32.434638 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pr22z" podStartSLOduration=2.828833607 podStartE2EDuration="7.434602378s" podCreationTimestamp="2025-11-28 15:37:25 +0000 UTC" firstStartedPulling="2025-11-28 15:37:27.348864057 +0000 UTC m=+7697.472805494" lastFinishedPulling="2025-11-28 15:37:31.954632818 +0000 UTC m=+7702.078574265" observedRunningTime="2025-11-28 15:37:32.433412366 +0000 UTC m=+7702.557353803" watchObservedRunningTime="2025-11-28 15:37:32.434602378 +0000 UTC m=+7702.558543825" Nov 28 15:37:35 crc kubenswrapper[4857]: I1128 15:37:35.929789 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:35 crc kubenswrapper[4857]: I1128 15:37:35.930399 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:36 crc kubenswrapper[4857]: I1128 15:37:36.973102 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pr22z" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="registry-server" probeResult="failure" output=< Nov 28 15:37:36 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 15:37:36 crc kubenswrapper[4857]: > Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.308414 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.309246 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.309308 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.310222 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a38ec213781fad7de97f923228698d06b20cca26201275d6ad39d000d8a37327"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.310294 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://a38ec213781fad7de97f923228698d06b20cca26201275d6ad39d000d8a37327" gracePeriod=600 Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.524981 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="a38ec213781fad7de97f923228698d06b20cca26201275d6ad39d000d8a37327" exitCode=0 Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.525031 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"a38ec213781fad7de97f923228698d06b20cca26201275d6ad39d000d8a37327"} Nov 28 15:37:41 crc kubenswrapper[4857]: I1128 15:37:41.525064 4857 scope.go:117] "RemoveContainer" containerID="8ce4d8d1204f0e33eb25524e56c57a576adc7638a6767a35e3118bbb0412c0ed" Nov 28 15:37:42 crc kubenswrapper[4857]: I1128 15:37:42.537130 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55"} Nov 28 15:37:46 crc kubenswrapper[4857]: I1128 15:37:46.002052 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:46 crc kubenswrapper[4857]: I1128 15:37:46.070414 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:46 crc kubenswrapper[4857]: I1128 15:37:46.252335 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pr22z"] Nov 28 15:37:47 crc kubenswrapper[4857]: I1128 15:37:47.595193 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pr22z" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="registry-server" containerID="cri-o://cddcb2cd1e1ee8abc8415148e18051ae32f9908ead6dcc093ebbb6fbe5e646fb" gracePeriod=2 Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.608250 4857 generic.go:334] "Generic (PLEG): container finished" podID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerID="cddcb2cd1e1ee8abc8415148e18051ae32f9908ead6dcc093ebbb6fbe5e646fb" exitCode=0 Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.608320 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerDied","Data":"cddcb2cd1e1ee8abc8415148e18051ae32f9908ead6dcc093ebbb6fbe5e646fb"} Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.608650 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pr22z" event={"ID":"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9","Type":"ContainerDied","Data":"045c6067cc5b166fd825b57a43b9819343bcb6c835edf9b0e837300552279362"} Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.608663 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="045c6067cc5b166fd825b57a43b9819343bcb6c835edf9b0e837300552279362" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.681124 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.745224 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-utilities\") pod \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.745327 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-catalog-content\") pod \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.745579 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs9k8\" (UniqueName: \"kubernetes.io/projected/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-kube-api-access-rs9k8\") pod \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\" (UID: \"d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9\") " Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.746400 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-utilities" (OuterVolumeSpecName: "utilities") pod "d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" (UID: "d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.755231 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-kube-api-access-rs9k8" (OuterVolumeSpecName: "kube-api-access-rs9k8") pod "d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" (UID: "d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9"). InnerVolumeSpecName "kube-api-access-rs9k8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.850614 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rs9k8\" (UniqueName: \"kubernetes.io/projected/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-kube-api-access-rs9k8\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.850651 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.880665 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" (UID: "d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:37:48 crc kubenswrapper[4857]: I1128 15:37:48.952644 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:49 crc kubenswrapper[4857]: I1128 15:37:49.621651 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pr22z" Nov 28 15:37:49 crc kubenswrapper[4857]: I1128 15:37:49.669035 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pr22z"] Nov 28 15:37:49 crc kubenswrapper[4857]: I1128 15:37:49.681156 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pr22z"] Nov 28 15:37:50 crc kubenswrapper[4857]: I1128 15:37:50.247915 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" path="/var/lib/kubelet/pods/d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9/volumes" Nov 28 15:37:56 crc kubenswrapper[4857]: I1128 15:37:56.728331 4857 generic.go:334] "Generic (PLEG): container finished" podID="f6435193-0fdc-46d5-a005-72ac32d221b4" containerID="5cbcb1f631117d4c088efbf5a6c41e8b34c5ab41839d8f1882f906d0c9c631b8" exitCode=0 Nov 28 15:37:56 crc kubenswrapper[4857]: I1128 15:37:56.728430 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-csvdf" event={"ID":"f6435193-0fdc-46d5-a005-72ac32d221b4","Type":"ContainerDied","Data":"5cbcb1f631117d4c088efbf5a6c41e8b34c5ab41839d8f1882f906d0c9c631b8"} Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.186691 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.277863 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-inventory\") pod \"f6435193-0fdc-46d5-a005-72ac32d221b4\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.277962 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ceph\") pod \"f6435193-0fdc-46d5-a005-72ac32d221b4\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.278102 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ssh-key\") pod \"f6435193-0fdc-46d5-a005-72ac32d221b4\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.278260 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49ld6\" (UniqueName: \"kubernetes.io/projected/f6435193-0fdc-46d5-a005-72ac32d221b4-kube-api-access-49ld6\") pod \"f6435193-0fdc-46d5-a005-72ac32d221b4\" (UID: \"f6435193-0fdc-46d5-a005-72ac32d221b4\") " Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.284256 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ceph" (OuterVolumeSpecName: "ceph") pod "f6435193-0fdc-46d5-a005-72ac32d221b4" (UID: "f6435193-0fdc-46d5-a005-72ac32d221b4"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.284287 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6435193-0fdc-46d5-a005-72ac32d221b4-kube-api-access-49ld6" (OuterVolumeSpecName: "kube-api-access-49ld6") pod "f6435193-0fdc-46d5-a005-72ac32d221b4" (UID: "f6435193-0fdc-46d5-a005-72ac32d221b4"). InnerVolumeSpecName "kube-api-access-49ld6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.311299 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "f6435193-0fdc-46d5-a005-72ac32d221b4" (UID: "f6435193-0fdc-46d5-a005-72ac32d221b4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.314318 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-inventory" (OuterVolumeSpecName: "inventory") pod "f6435193-0fdc-46d5-a005-72ac32d221b4" (UID: "f6435193-0fdc-46d5-a005-72ac32d221b4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.381192 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49ld6\" (UniqueName: \"kubernetes.io/projected/f6435193-0fdc-46d5-a005-72ac32d221b4-kube-api-access-49ld6\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.381232 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.381242 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.381250 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/f6435193-0fdc-46d5-a005-72ac32d221b4-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.760281 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-csvdf" event={"ID":"f6435193-0fdc-46d5-a005-72ac32d221b4","Type":"ContainerDied","Data":"03971c0774d3893dcef051051794f27819b1a6c7252cb483564368e7ca77462c"} Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.760369 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03971c0774d3893dcef051051794f27819b1a6c7252cb483564368e7ca77462c" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.760400 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-csvdf" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.900683 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-wlm6l"] Nov 28 15:37:58 crc kubenswrapper[4857]: E1128 15:37:58.901179 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="registry-server" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.901201 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="registry-server" Nov 28 15:37:58 crc kubenswrapper[4857]: E1128 15:37:58.901217 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6435193-0fdc-46d5-a005-72ac32d221b4" containerName="install-os-openstack-openstack-cell1" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.901227 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6435193-0fdc-46d5-a005-72ac32d221b4" containerName="install-os-openstack-openstack-cell1" Nov 28 15:37:58 crc kubenswrapper[4857]: E1128 15:37:58.901246 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="extract-utilities" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.901256 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="extract-utilities" Nov 28 15:37:58 crc kubenswrapper[4857]: E1128 15:37:58.901314 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="extract-content" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.901324 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="extract-content" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.901579 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1a04f32-4cbc-44cf-bb14-b3d44ff24ca9" containerName="registry-server" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.901621 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6435193-0fdc-46d5-a005-72ac32d221b4" containerName="install-os-openstack-openstack-cell1" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.902549 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.905645 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.906035 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.906892 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.908266 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:37:58 crc kubenswrapper[4857]: I1128 15:37:58.926139 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-wlm6l"] Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.011417 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ceph\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.011522 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twq6w\" (UniqueName: \"kubernetes.io/projected/ab248fa2-2b7f-4571-ba24-ed45686e9d06-kube-api-access-twq6w\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.011586 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-inventory\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.011652 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ssh-key\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.113788 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-inventory\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.113876 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ssh-key\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.113903 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ceph\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.114002 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twq6w\" (UniqueName: \"kubernetes.io/projected/ab248fa2-2b7f-4571-ba24-ed45686e9d06-kube-api-access-twq6w\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.120003 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-inventory\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.120063 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ssh-key\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.120499 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ceph\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.136293 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twq6w\" (UniqueName: \"kubernetes.io/projected/ab248fa2-2b7f-4571-ba24-ed45686e9d06-kube-api-access-twq6w\") pod \"configure-os-openstack-openstack-cell1-wlm6l\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.244175 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:37:59 crc kubenswrapper[4857]: I1128 15:37:59.825603 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-wlm6l"] Nov 28 15:38:00 crc kubenswrapper[4857]: I1128 15:38:00.785820 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" event={"ID":"ab248fa2-2b7f-4571-ba24-ed45686e9d06","Type":"ContainerStarted","Data":"c22e5e6ba5e756e3f3e8906ed58777a1906e087bbe0f9406e0838d0f61327d1c"} Nov 28 15:38:00 crc kubenswrapper[4857]: I1128 15:38:00.786530 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" event={"ID":"ab248fa2-2b7f-4571-ba24-ed45686e9d06","Type":"ContainerStarted","Data":"3b5defe80e8a7fcbc807733109ff707f58cb485a3236bc5b15296d8135729e30"} Nov 28 15:38:00 crc kubenswrapper[4857]: I1128 15:38:00.808018 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" podStartSLOduration=2.2579480370000002 podStartE2EDuration="2.807998864s" podCreationTimestamp="2025-11-28 15:37:58 +0000 UTC" firstStartedPulling="2025-11-28 15:37:59.83211214 +0000 UTC m=+7729.956053597" lastFinishedPulling="2025-11-28 15:38:00.382162977 +0000 UTC m=+7730.506104424" observedRunningTime="2025-11-28 15:38:00.800827963 +0000 UTC m=+7730.924769410" watchObservedRunningTime="2025-11-28 15:38:00.807998864 +0000 UTC m=+7730.931940321" Nov 28 15:38:45 crc kubenswrapper[4857]: I1128 15:38:45.298590 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab248fa2-2b7f-4571-ba24-ed45686e9d06" containerID="c22e5e6ba5e756e3f3e8906ed58777a1906e087bbe0f9406e0838d0f61327d1c" exitCode=0 Nov 28 15:38:45 crc kubenswrapper[4857]: I1128 15:38:45.298829 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" event={"ID":"ab248fa2-2b7f-4571-ba24-ed45686e9d06","Type":"ContainerDied","Data":"c22e5e6ba5e756e3f3e8906ed58777a1906e087bbe0f9406e0838d0f61327d1c"} Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.756566 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.920090 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-inventory\") pod \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.920226 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ceph\") pod \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.920295 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ssh-key\") pod \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.920376 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twq6w\" (UniqueName: \"kubernetes.io/projected/ab248fa2-2b7f-4571-ba24-ed45686e9d06-kube-api-access-twq6w\") pod \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\" (UID: \"ab248fa2-2b7f-4571-ba24-ed45686e9d06\") " Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.925741 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ceph" (OuterVolumeSpecName: "ceph") pod "ab248fa2-2b7f-4571-ba24-ed45686e9d06" (UID: "ab248fa2-2b7f-4571-ba24-ed45686e9d06"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.932234 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab248fa2-2b7f-4571-ba24-ed45686e9d06-kube-api-access-twq6w" (OuterVolumeSpecName: "kube-api-access-twq6w") pod "ab248fa2-2b7f-4571-ba24-ed45686e9d06" (UID: "ab248fa2-2b7f-4571-ba24-ed45686e9d06"). InnerVolumeSpecName "kube-api-access-twq6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.953061 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ab248fa2-2b7f-4571-ba24-ed45686e9d06" (UID: "ab248fa2-2b7f-4571-ba24-ed45686e9d06"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:38:46 crc kubenswrapper[4857]: I1128 15:38:46.960904 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-inventory" (OuterVolumeSpecName: "inventory") pod "ab248fa2-2b7f-4571-ba24-ed45686e9d06" (UID: "ab248fa2-2b7f-4571-ba24-ed45686e9d06"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.023858 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.023898 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.023911 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab248fa2-2b7f-4571-ba24-ed45686e9d06-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.023926 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twq6w\" (UniqueName: \"kubernetes.io/projected/ab248fa2-2b7f-4571-ba24-ed45686e9d06-kube-api-access-twq6w\") on node \"crc\" DevicePath \"\"" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.322102 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" event={"ID":"ab248fa2-2b7f-4571-ba24-ed45686e9d06","Type":"ContainerDied","Data":"3b5defe80e8a7fcbc807733109ff707f58cb485a3236bc5b15296d8135729e30"} Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.322145 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b5defe80e8a7fcbc807733109ff707f58cb485a3236bc5b15296d8135729e30" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.322222 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-wlm6l" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.398339 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-6mlkc"] Nov 28 15:38:47 crc kubenswrapper[4857]: E1128 15:38:47.398809 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab248fa2-2b7f-4571-ba24-ed45686e9d06" containerName="configure-os-openstack-openstack-cell1" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.398831 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab248fa2-2b7f-4571-ba24-ed45686e9d06" containerName="configure-os-openstack-openstack-cell1" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.399169 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab248fa2-2b7f-4571-ba24-ed45686e9d06" containerName="configure-os-openstack-openstack-cell1" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.399977 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.402545 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.402888 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.402900 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.403236 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.410253 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-6mlkc"] Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.535033 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ceph\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.535228 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.535451 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-inventory-0\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.535771 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh77c\" (UniqueName: \"kubernetes.io/projected/4f163911-5f98-47d3-9771-8e1aa7175cff-kube-api-access-hh77c\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.639118 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.639255 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-inventory-0\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.639368 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh77c\" (UniqueName: \"kubernetes.io/projected/4f163911-5f98-47d3-9771-8e1aa7175cff-kube-api-access-hh77c\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.639634 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ceph\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.644750 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.646124 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ceph\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.646591 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-inventory-0\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.658420 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh77c\" (UniqueName: \"kubernetes.io/projected/4f163911-5f98-47d3-9771-8e1aa7175cff-kube-api-access-hh77c\") pod \"ssh-known-hosts-openstack-6mlkc\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:47 crc kubenswrapper[4857]: I1128 15:38:47.719127 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:38:48 crc kubenswrapper[4857]: I1128 15:38:48.307897 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-6mlkc"] Nov 28 15:38:48 crc kubenswrapper[4857]: W1128 15:38:48.318750 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f163911_5f98_47d3_9771_8e1aa7175cff.slice/crio-e5792e5bee16db4796e02f5429a18f256fc778b3004d24cf8e2014dc16899d22 WatchSource:0}: Error finding container e5792e5bee16db4796e02f5429a18f256fc778b3004d24cf8e2014dc16899d22: Status 404 returned error can't find the container with id e5792e5bee16db4796e02f5429a18f256fc778b3004d24cf8e2014dc16899d22 Nov 28 15:38:48 crc kubenswrapper[4857]: I1128 15:38:48.335679 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-6mlkc" event={"ID":"4f163911-5f98-47d3-9771-8e1aa7175cff","Type":"ContainerStarted","Data":"e5792e5bee16db4796e02f5429a18f256fc778b3004d24cf8e2014dc16899d22"} Nov 28 15:38:50 crc kubenswrapper[4857]: I1128 15:38:50.358478 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-6mlkc" event={"ID":"4f163911-5f98-47d3-9771-8e1aa7175cff","Type":"ContainerStarted","Data":"0b8292bb69c28c748476834b1c89194ca52ee5127d9ab1059ca2f3262515f528"} Nov 28 15:38:50 crc kubenswrapper[4857]: I1128 15:38:50.389024 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-6mlkc" podStartSLOduration=2.519922685 podStartE2EDuration="3.389002473s" podCreationTimestamp="2025-11-28 15:38:47 +0000 UTC" firstStartedPulling="2025-11-28 15:38:48.321323046 +0000 UTC m=+7778.445264483" lastFinishedPulling="2025-11-28 15:38:49.190402844 +0000 UTC m=+7779.314344271" observedRunningTime="2025-11-28 15:38:50.379184041 +0000 UTC m=+7780.503125488" watchObservedRunningTime="2025-11-28 15:38:50.389002473 +0000 UTC m=+7780.512943920" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.647211 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zwmw5"] Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.650506 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.671675 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zwmw5"] Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.749372 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-utilities\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.749527 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-catalog-content\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.749629 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9q7s\" (UniqueName: \"kubernetes.io/projected/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-kube-api-access-v9q7s\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.851630 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-utilities\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.851749 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-catalog-content\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.851852 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9q7s\" (UniqueName: \"kubernetes.io/projected/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-kube-api-access-v9q7s\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.852329 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-utilities\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.852408 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-catalog-content\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.873783 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9q7s\" (UniqueName: \"kubernetes.io/projected/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-kube-api-access-v9q7s\") pod \"community-operators-zwmw5\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:56 crc kubenswrapper[4857]: I1128 15:38:56.989072 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:38:57 crc kubenswrapper[4857]: W1128 15:38:57.607393 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c10f4bc_7a19_4f5c_b6ac_ae035c3460e8.slice/crio-ebf08488904161e78dea85e64ce72a74e879b12f5401d5b40b5c2e16f7750e6d WatchSource:0}: Error finding container ebf08488904161e78dea85e64ce72a74e879b12f5401d5b40b5c2e16f7750e6d: Status 404 returned error can't find the container with id ebf08488904161e78dea85e64ce72a74e879b12f5401d5b40b5c2e16f7750e6d Nov 28 15:38:57 crc kubenswrapper[4857]: I1128 15:38:57.608239 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zwmw5"] Nov 28 15:38:58 crc kubenswrapper[4857]: I1128 15:38:58.449795 4857 generic.go:334] "Generic (PLEG): container finished" podID="4f163911-5f98-47d3-9771-8e1aa7175cff" containerID="0b8292bb69c28c748476834b1c89194ca52ee5127d9ab1059ca2f3262515f528" exitCode=0 Nov 28 15:38:58 crc kubenswrapper[4857]: I1128 15:38:58.449870 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-6mlkc" event={"ID":"4f163911-5f98-47d3-9771-8e1aa7175cff","Type":"ContainerDied","Data":"0b8292bb69c28c748476834b1c89194ca52ee5127d9ab1059ca2f3262515f528"} Nov 28 15:38:58 crc kubenswrapper[4857]: I1128 15:38:58.452655 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerID="71dc8751e63480eaed012ebad0778a9d4f2f6586b350fab9dba4abdc52c2dcb6" exitCode=0 Nov 28 15:38:58 crc kubenswrapper[4857]: I1128 15:38:58.452723 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zwmw5" event={"ID":"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8","Type":"ContainerDied","Data":"71dc8751e63480eaed012ebad0778a9d4f2f6586b350fab9dba4abdc52c2dcb6"} Nov 28 15:38:58 crc kubenswrapper[4857]: I1128 15:38:58.452758 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zwmw5" event={"ID":"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8","Type":"ContainerStarted","Data":"ebf08488904161e78dea85e64ce72a74e879b12f5401d5b40b5c2e16f7750e6d"} Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.064444 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.236920 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ceph\") pod \"4f163911-5f98-47d3-9771-8e1aa7175cff\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.237038 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-inventory-0\") pod \"4f163911-5f98-47d3-9771-8e1aa7175cff\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.237200 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh77c\" (UniqueName: \"kubernetes.io/projected/4f163911-5f98-47d3-9771-8e1aa7175cff-kube-api-access-hh77c\") pod \"4f163911-5f98-47d3-9771-8e1aa7175cff\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.237420 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ssh-key-openstack-cell1\") pod \"4f163911-5f98-47d3-9771-8e1aa7175cff\" (UID: \"4f163911-5f98-47d3-9771-8e1aa7175cff\") " Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.247114 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f163911-5f98-47d3-9771-8e1aa7175cff-kube-api-access-hh77c" (OuterVolumeSpecName: "kube-api-access-hh77c") pod "4f163911-5f98-47d3-9771-8e1aa7175cff" (UID: "4f163911-5f98-47d3-9771-8e1aa7175cff"). InnerVolumeSpecName "kube-api-access-hh77c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.254281 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ceph" (OuterVolumeSpecName: "ceph") pod "4f163911-5f98-47d3-9771-8e1aa7175cff" (UID: "4f163911-5f98-47d3-9771-8e1aa7175cff"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.275533 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "4f163911-5f98-47d3-9771-8e1aa7175cff" (UID: "4f163911-5f98-47d3-9771-8e1aa7175cff"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.280929 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "4f163911-5f98-47d3-9771-8e1aa7175cff" (UID: "4f163911-5f98-47d3-9771-8e1aa7175cff"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.340175 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.340216 4857 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.340233 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh77c\" (UniqueName: \"kubernetes.io/projected/4f163911-5f98-47d3-9771-8e1aa7175cff-kube-api-access-hh77c\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.340248 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/4f163911-5f98-47d3-9771-8e1aa7175cff-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.477982 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerID="4a50e3157ea6bb089b7e717899a747a61730f80a45854cab1ca292f92951754b" exitCode=0 Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.478103 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zwmw5" event={"ID":"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8","Type":"ContainerDied","Data":"4a50e3157ea6bb089b7e717899a747a61730f80a45854cab1ca292f92951754b"} Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.490228 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-6mlkc" event={"ID":"4f163911-5f98-47d3-9771-8e1aa7175cff","Type":"ContainerDied","Data":"e5792e5bee16db4796e02f5429a18f256fc778b3004d24cf8e2014dc16899d22"} Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.490290 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5792e5bee16db4796e02f5429a18f256fc778b3004d24cf8e2014dc16899d22" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.490392 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-6mlkc" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.553557 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-cn6q8"] Nov 28 15:39:00 crc kubenswrapper[4857]: E1128 15:39:00.554070 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f163911-5f98-47d3-9771-8e1aa7175cff" containerName="ssh-known-hosts-openstack" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.554089 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f163911-5f98-47d3-9771-8e1aa7175cff" containerName="ssh-known-hosts-openstack" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.554355 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f163911-5f98-47d3-9771-8e1aa7175cff" containerName="ssh-known-hosts-openstack" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.555183 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.556815 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.556873 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.557122 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.559621 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.576276 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-cn6q8"] Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.750980 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ssh-key\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.751027 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ceph\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.751109 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwjzc\" (UniqueName: \"kubernetes.io/projected/4f4a22c2-c87d-46d3-a6b0-72968f2661af-kube-api-access-hwjzc\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.751158 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-inventory\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.853476 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-inventory\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.854082 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ssh-key\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.854117 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ceph\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.854147 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwjzc\" (UniqueName: \"kubernetes.io/projected/4f4a22c2-c87d-46d3-a6b0-72968f2661af-kube-api-access-hwjzc\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.859534 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ssh-key\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.859855 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-inventory\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.860529 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ceph\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.872285 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwjzc\" (UniqueName: \"kubernetes.io/projected/4f4a22c2-c87d-46d3-a6b0-72968f2661af-kube-api-access-hwjzc\") pod \"run-os-openstack-openstack-cell1-cn6q8\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:00 crc kubenswrapper[4857]: I1128 15:39:00.876143 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:01 crc kubenswrapper[4857]: I1128 15:39:01.432695 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-cn6q8"] Nov 28 15:39:01 crc kubenswrapper[4857]: W1128 15:39:01.435239 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f4a22c2_c87d_46d3_a6b0_72968f2661af.slice/crio-f3eaeb9503f09ea52014e96d0bf149536a9af9fecc09b2a89e48caee2a9c130d WatchSource:0}: Error finding container f3eaeb9503f09ea52014e96d0bf149536a9af9fecc09b2a89e48caee2a9c130d: Status 404 returned error can't find the container with id f3eaeb9503f09ea52014e96d0bf149536a9af9fecc09b2a89e48caee2a9c130d Nov 28 15:39:01 crc kubenswrapper[4857]: I1128 15:39:01.501452 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" event={"ID":"4f4a22c2-c87d-46d3-a6b0-72968f2661af","Type":"ContainerStarted","Data":"f3eaeb9503f09ea52014e96d0bf149536a9af9fecc09b2a89e48caee2a9c130d"} Nov 28 15:39:02 crc kubenswrapper[4857]: I1128 15:39:02.518473 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zwmw5" event={"ID":"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8","Type":"ContainerStarted","Data":"f491ed83bf8ddabcdc5a276d3a658ce933993670d1ef58fd82e04d7daa3d8c7e"} Nov 28 15:39:02 crc kubenswrapper[4857]: I1128 15:39:02.521302 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" event={"ID":"4f4a22c2-c87d-46d3-a6b0-72968f2661af","Type":"ContainerStarted","Data":"e340cf63967e2dc2607d99fd7a4a3950f112400d42d6353f53005c0ea7fd3b28"} Nov 28 15:39:02 crc kubenswrapper[4857]: I1128 15:39:02.547442 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zwmw5" podStartSLOduration=3.600340729 podStartE2EDuration="6.547418169s" podCreationTimestamp="2025-11-28 15:38:56 +0000 UTC" firstStartedPulling="2025-11-28 15:38:58.455757798 +0000 UTC m=+7788.579699245" lastFinishedPulling="2025-11-28 15:39:01.402835248 +0000 UTC m=+7791.526776685" observedRunningTime="2025-11-28 15:39:02.542780245 +0000 UTC m=+7792.666721742" watchObservedRunningTime="2025-11-28 15:39:02.547418169 +0000 UTC m=+7792.671359606" Nov 28 15:39:02 crc kubenswrapper[4857]: I1128 15:39:02.566893 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" podStartSLOduration=2.283450074 podStartE2EDuration="2.566871917s" podCreationTimestamp="2025-11-28 15:39:00 +0000 UTC" firstStartedPulling="2025-11-28 15:39:01.438112028 +0000 UTC m=+7791.562053465" lastFinishedPulling="2025-11-28 15:39:01.721533871 +0000 UTC m=+7791.845475308" observedRunningTime="2025-11-28 15:39:02.558265818 +0000 UTC m=+7792.682207265" watchObservedRunningTime="2025-11-28 15:39:02.566871917 +0000 UTC m=+7792.690813364" Nov 28 15:39:06 crc kubenswrapper[4857]: I1128 15:39:06.989872 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:39:06 crc kubenswrapper[4857]: I1128 15:39:06.990490 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:39:07 crc kubenswrapper[4857]: I1128 15:39:07.047188 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:39:07 crc kubenswrapper[4857]: I1128 15:39:07.633718 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:39:07 crc kubenswrapper[4857]: I1128 15:39:07.694759 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zwmw5"] Nov 28 15:39:09 crc kubenswrapper[4857]: I1128 15:39:09.587195 4857 generic.go:334] "Generic (PLEG): container finished" podID="4f4a22c2-c87d-46d3-a6b0-72968f2661af" containerID="e340cf63967e2dc2607d99fd7a4a3950f112400d42d6353f53005c0ea7fd3b28" exitCode=0 Nov 28 15:39:09 crc kubenswrapper[4857]: I1128 15:39:09.587399 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" event={"ID":"4f4a22c2-c87d-46d3-a6b0-72968f2661af","Type":"ContainerDied","Data":"e340cf63967e2dc2607d99fd7a4a3950f112400d42d6353f53005c0ea7fd3b28"} Nov 28 15:39:09 crc kubenswrapper[4857]: I1128 15:39:09.587711 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zwmw5" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="registry-server" containerID="cri-o://f491ed83bf8ddabcdc5a276d3a658ce933993670d1ef58fd82e04d7daa3d8c7e" gracePeriod=2 Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.604035 4857 generic.go:334] "Generic (PLEG): container finished" podID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerID="f491ed83bf8ddabcdc5a276d3a658ce933993670d1ef58fd82e04d7daa3d8c7e" exitCode=0 Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.605106 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zwmw5" event={"ID":"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8","Type":"ContainerDied","Data":"f491ed83bf8ddabcdc5a276d3a658ce933993670d1ef58fd82e04d7daa3d8c7e"} Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.760144 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.889339 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-utilities\") pod \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.889411 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9q7s\" (UniqueName: \"kubernetes.io/projected/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-kube-api-access-v9q7s\") pod \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.889603 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-catalog-content\") pod \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\" (UID: \"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8\") " Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.890908 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-utilities" (OuterVolumeSpecName: "utilities") pod "4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" (UID: "4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.910644 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-kube-api-access-v9q7s" (OuterVolumeSpecName: "kube-api-access-v9q7s") pod "4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" (UID: "4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8"). InnerVolumeSpecName "kube-api-access-v9q7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.961856 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" (UID: "4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.992180 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.992559 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9q7s\" (UniqueName: \"kubernetes.io/projected/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-kube-api-access-v9q7s\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:10 crc kubenswrapper[4857]: I1128 15:39:10.992577 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.109145 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.298618 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ceph\") pod \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.298727 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwjzc\" (UniqueName: \"kubernetes.io/projected/4f4a22c2-c87d-46d3-a6b0-72968f2661af-kube-api-access-hwjzc\") pod \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.298910 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-inventory\") pod \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.299161 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ssh-key\") pod \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\" (UID: \"4f4a22c2-c87d-46d3-a6b0-72968f2661af\") " Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.304080 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ceph" (OuterVolumeSpecName: "ceph") pod "4f4a22c2-c87d-46d3-a6b0-72968f2661af" (UID: "4f4a22c2-c87d-46d3-a6b0-72968f2661af"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.311304 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f4a22c2-c87d-46d3-a6b0-72968f2661af-kube-api-access-hwjzc" (OuterVolumeSpecName: "kube-api-access-hwjzc") pod "4f4a22c2-c87d-46d3-a6b0-72968f2661af" (UID: "4f4a22c2-c87d-46d3-a6b0-72968f2661af"). InnerVolumeSpecName "kube-api-access-hwjzc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.331468 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4f4a22c2-c87d-46d3-a6b0-72968f2661af" (UID: "4f4a22c2-c87d-46d3-a6b0-72968f2661af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.343873 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-inventory" (OuterVolumeSpecName: "inventory") pod "4f4a22c2-c87d-46d3-a6b0-72968f2661af" (UID: "4f4a22c2-c87d-46d3-a6b0-72968f2661af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.403112 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.403157 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.403171 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwjzc\" (UniqueName: \"kubernetes.io/projected/4f4a22c2-c87d-46d3-a6b0-72968f2661af-kube-api-access-hwjzc\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.403185 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4f4a22c2-c87d-46d3-a6b0-72968f2661af-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.616836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zwmw5" event={"ID":"4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8","Type":"ContainerDied","Data":"ebf08488904161e78dea85e64ce72a74e879b12f5401d5b40b5c2e16f7750e6d"} Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.616893 4857 scope.go:117] "RemoveContainer" containerID="f491ed83bf8ddabcdc5a276d3a658ce933993670d1ef58fd82e04d7daa3d8c7e" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.617076 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zwmw5" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.624016 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" event={"ID":"4f4a22c2-c87d-46d3-a6b0-72968f2661af","Type":"ContainerDied","Data":"f3eaeb9503f09ea52014e96d0bf149536a9af9fecc09b2a89e48caee2a9c130d"} Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.624059 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3eaeb9503f09ea52014e96d0bf149536a9af9fecc09b2a89e48caee2a9c130d" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.624132 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-cn6q8" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.656582 4857 scope.go:117] "RemoveContainer" containerID="4a50e3157ea6bb089b7e717899a747a61730f80a45854cab1ca292f92951754b" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.700768 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zwmw5"] Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.711357 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-st7gd"] Nov 28 15:39:11 crc kubenswrapper[4857]: E1128 15:39:11.712335 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="registry-server" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.712372 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="registry-server" Nov 28 15:39:11 crc kubenswrapper[4857]: E1128 15:39:11.712412 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="extract-content" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.712425 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="extract-content" Nov 28 15:39:11 crc kubenswrapper[4857]: E1128 15:39:11.712442 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f4a22c2-c87d-46d3-a6b0-72968f2661af" containerName="run-os-openstack-openstack-cell1" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.712454 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f4a22c2-c87d-46d3-a6b0-72968f2661af" containerName="run-os-openstack-openstack-cell1" Nov 28 15:39:11 crc kubenswrapper[4857]: E1128 15:39:11.712483 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="extract-utilities" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.712495 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="extract-utilities" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.712886 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" containerName="registry-server" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.712943 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f4a22c2-c87d-46d3-a6b0-72968f2661af" containerName="run-os-openstack-openstack-cell1" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.714240 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.719212 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.719250 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.719711 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.719924 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.721477 4857 scope.go:117] "RemoveContainer" containerID="71dc8751e63480eaed012ebad0778a9d4f2f6586b350fab9dba4abdc52c2dcb6" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.722432 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zwmw5"] Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.762746 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-st7gd"] Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.916178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-inventory\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.916241 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.916298 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qws9h\" (UniqueName: \"kubernetes.io/projected/e63beefb-f92b-4109-890b-209999158ac7-kube-api-access-qws9h\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:11 crc kubenswrapper[4857]: I1128 15:39:11.916480 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ceph\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.018477 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ceph\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.018639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-inventory\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.018662 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.018681 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qws9h\" (UniqueName: \"kubernetes.io/projected/e63beefb-f92b-4109-890b-209999158ac7-kube-api-access-qws9h\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.023432 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ceph\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.025238 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-inventory\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.029389 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.042594 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qws9h\" (UniqueName: \"kubernetes.io/projected/e63beefb-f92b-4109-890b-209999158ac7-kube-api-access-qws9h\") pod \"reboot-os-openstack-openstack-cell1-st7gd\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.131260 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.245866 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8" path="/var/lib/kubelet/pods/4c10f4bc-7a19-4f5c-b6ac-ae035c3460e8/volumes" Nov 28 15:39:12 crc kubenswrapper[4857]: I1128 15:39:12.709156 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-st7gd"] Nov 28 15:39:13 crc kubenswrapper[4857]: I1128 15:39:13.659245 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" event={"ID":"e63beefb-f92b-4109-890b-209999158ac7","Type":"ContainerStarted","Data":"084ec4a17078a4456b904f6ea8ff60c4550ad23792c6511666f78f86f5f4b92f"} Nov 28 15:39:13 crc kubenswrapper[4857]: I1128 15:39:13.659604 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" event={"ID":"e63beefb-f92b-4109-890b-209999158ac7","Type":"ContainerStarted","Data":"b69baec03c27afc2eea5ea9a6c4fcd592ee95b88db7467ec56d89ea367425aea"} Nov 28 15:39:13 crc kubenswrapper[4857]: I1128 15:39:13.677581 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" podStartSLOduration=2.504859271 podStartE2EDuration="2.677558923s" podCreationTimestamp="2025-11-28 15:39:11 +0000 UTC" firstStartedPulling="2025-11-28 15:39:12.720753737 +0000 UTC m=+7802.844695184" lastFinishedPulling="2025-11-28 15:39:12.893453399 +0000 UTC m=+7803.017394836" observedRunningTime="2025-11-28 15:39:13.675606081 +0000 UTC m=+7803.799547518" watchObservedRunningTime="2025-11-28 15:39:13.677558923 +0000 UTC m=+7803.801500370" Nov 28 15:39:29 crc kubenswrapper[4857]: I1128 15:39:29.870576 4857 generic.go:334] "Generic (PLEG): container finished" podID="e63beefb-f92b-4109-890b-209999158ac7" containerID="084ec4a17078a4456b904f6ea8ff60c4550ad23792c6511666f78f86f5f4b92f" exitCode=0 Nov 28 15:39:29 crc kubenswrapper[4857]: I1128 15:39:29.870720 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" event={"ID":"e63beefb-f92b-4109-890b-209999158ac7","Type":"ContainerDied","Data":"084ec4a17078a4456b904f6ea8ff60c4550ad23792c6511666f78f86f5f4b92f"} Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.341489 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.414135 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-inventory\") pod \"e63beefb-f92b-4109-890b-209999158ac7\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.414197 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ssh-key\") pod \"e63beefb-f92b-4109-890b-209999158ac7\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.414260 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ceph\") pod \"e63beefb-f92b-4109-890b-209999158ac7\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.414472 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qws9h\" (UniqueName: \"kubernetes.io/projected/e63beefb-f92b-4109-890b-209999158ac7-kube-api-access-qws9h\") pod \"e63beefb-f92b-4109-890b-209999158ac7\" (UID: \"e63beefb-f92b-4109-890b-209999158ac7\") " Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.421825 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ceph" (OuterVolumeSpecName: "ceph") pod "e63beefb-f92b-4109-890b-209999158ac7" (UID: "e63beefb-f92b-4109-890b-209999158ac7"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.441143 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e63beefb-f92b-4109-890b-209999158ac7-kube-api-access-qws9h" (OuterVolumeSpecName: "kube-api-access-qws9h") pod "e63beefb-f92b-4109-890b-209999158ac7" (UID: "e63beefb-f92b-4109-890b-209999158ac7"). InnerVolumeSpecName "kube-api-access-qws9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.472149 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-inventory" (OuterVolumeSpecName: "inventory") pod "e63beefb-f92b-4109-890b-209999158ac7" (UID: "e63beefb-f92b-4109-890b-209999158ac7"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.476141 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e63beefb-f92b-4109-890b-209999158ac7" (UID: "e63beefb-f92b-4109-890b-209999158ac7"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.516019 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qws9h\" (UniqueName: \"kubernetes.io/projected/e63beefb-f92b-4109-890b-209999158ac7-kube-api-access-qws9h\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.516056 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.516071 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.516084 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/e63beefb-f92b-4109-890b-209999158ac7-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.896202 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" event={"ID":"e63beefb-f92b-4109-890b-209999158ac7","Type":"ContainerDied","Data":"b69baec03c27afc2eea5ea9a6c4fcd592ee95b88db7467ec56d89ea367425aea"} Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.896535 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b69baec03c27afc2eea5ea9a6c4fcd592ee95b88db7467ec56d89ea367425aea" Nov 28 15:39:31 crc kubenswrapper[4857]: I1128 15:39:31.896265 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-st7gd" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.009023 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-fwjs4"] Nov 28 15:39:32 crc kubenswrapper[4857]: E1128 15:39:32.009539 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e63beefb-f92b-4109-890b-209999158ac7" containerName="reboot-os-openstack-openstack-cell1" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.009561 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e63beefb-f92b-4109-890b-209999158ac7" containerName="reboot-os-openstack-openstack-cell1" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.009869 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e63beefb-f92b-4109-890b-209999158ac7" containerName="reboot-os-openstack-openstack-cell1" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.010889 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.014843 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.015921 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.016304 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.016939 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026301 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ceph\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026376 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-inventory\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026472 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpmdn\" (UniqueName: \"kubernetes.io/projected/4608f83d-58f5-4bba-bc64-ba729a74e876-kube-api-access-vpmdn\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026537 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026571 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026598 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026650 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026718 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026847 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026905 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.026978 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.027054 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ssh-key\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.033400 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-fwjs4"] Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129121 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ssh-key\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129248 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ceph\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129287 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-inventory\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129351 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpmdn\" (UniqueName: \"kubernetes.io/projected/4608f83d-58f5-4bba-bc64-ba729a74e876-kube-api-access-vpmdn\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129398 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129425 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129447 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129465 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129486 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129577 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129607 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.129639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.135589 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ceph\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.135871 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.136254 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.136922 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.137231 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ssh-key\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.137679 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-inventory\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.143106 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.143667 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.143675 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.149740 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.151118 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.169608 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpmdn\" (UniqueName: \"kubernetes.io/projected/4608f83d-58f5-4bba-bc64-ba729a74e876-kube-api-access-vpmdn\") pod \"install-certs-openstack-openstack-cell1-fwjs4\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.330898 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:32 crc kubenswrapper[4857]: I1128 15:39:32.956588 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-fwjs4"] Nov 28 15:39:33 crc kubenswrapper[4857]: I1128 15:39:33.923836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" event={"ID":"4608f83d-58f5-4bba-bc64-ba729a74e876","Type":"ContainerStarted","Data":"00560060d7d8eac5a6ffc55d8b1a7312bc74d8f3a4850e10fcc0b03d0ab15b83"} Nov 28 15:39:33 crc kubenswrapper[4857]: I1128 15:39:33.924702 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" event={"ID":"4608f83d-58f5-4bba-bc64-ba729a74e876","Type":"ContainerStarted","Data":"ab727addc3fbd7ab267adcac1951d5dc2115ed6ff922d1aa9046cfa1c57883ce"} Nov 28 15:39:33 crc kubenswrapper[4857]: I1128 15:39:33.962280 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" podStartSLOduration=2.776860671 podStartE2EDuration="2.962254941s" podCreationTimestamp="2025-11-28 15:39:31 +0000 UTC" firstStartedPulling="2025-11-28 15:39:32.95759587 +0000 UTC m=+7823.081537307" lastFinishedPulling="2025-11-28 15:39:33.14299014 +0000 UTC m=+7823.266931577" observedRunningTime="2025-11-28 15:39:33.950660162 +0000 UTC m=+7824.074601639" watchObservedRunningTime="2025-11-28 15:39:33.962254941 +0000 UTC m=+7824.086196388" Nov 28 15:39:41 crc kubenswrapper[4857]: I1128 15:39:41.308259 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:39:41 crc kubenswrapper[4857]: I1128 15:39:41.308857 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:39:52 crc kubenswrapper[4857]: I1128 15:39:52.150672 4857 generic.go:334] "Generic (PLEG): container finished" podID="4608f83d-58f5-4bba-bc64-ba729a74e876" containerID="00560060d7d8eac5a6ffc55d8b1a7312bc74d8f3a4850e10fcc0b03d0ab15b83" exitCode=0 Nov 28 15:39:52 crc kubenswrapper[4857]: I1128 15:39:52.150767 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" event={"ID":"4608f83d-58f5-4bba-bc64-ba729a74e876","Type":"ContainerDied","Data":"00560060d7d8eac5a6ffc55d8b1a7312bc74d8f3a4850e10fcc0b03d0ab15b83"} Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.622742 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.790697 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-sriov-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.790986 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ovn-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791204 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ceph\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791262 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-libvirt-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791342 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-telemetry-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791415 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-nova-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791462 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-bootstrap-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791575 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-dhcp-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791624 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-inventory\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791658 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ssh-key\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791837 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpmdn\" (UniqueName: \"kubernetes.io/projected/4608f83d-58f5-4bba-bc64-ba729a74e876-kube-api-access-vpmdn\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.791981 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-metadata-combined-ca-bundle\") pod \"4608f83d-58f5-4bba-bc64-ba729a74e876\" (UID: \"4608f83d-58f5-4bba-bc64-ba729a74e876\") " Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.796923 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.798013 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.798580 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.800089 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.800202 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4608f83d-58f5-4bba-bc64-ba729a74e876-kube-api-access-vpmdn" (OuterVolumeSpecName: "kube-api-access-vpmdn") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "kube-api-access-vpmdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.800212 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.800800 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.801531 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.802629 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ceph" (OuterVolumeSpecName: "ceph") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.803104 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.827886 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-inventory" (OuterVolumeSpecName: "inventory") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.835397 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "4608f83d-58f5-4bba-bc64-ba729a74e876" (UID: "4608f83d-58f5-4bba-bc64-ba729a74e876"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894266 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894324 4857 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894347 4857 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894366 4857 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894385 4857 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894402 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894421 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894440 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894455 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpmdn\" (UniqueName: \"kubernetes.io/projected/4608f83d-58f5-4bba-bc64-ba729a74e876-kube-api-access-vpmdn\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894475 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894491 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:53 crc kubenswrapper[4857]: I1128 15:39:53.894510 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4608f83d-58f5-4bba-bc64-ba729a74e876-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.189438 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" event={"ID":"4608f83d-58f5-4bba-bc64-ba729a74e876","Type":"ContainerDied","Data":"ab727addc3fbd7ab267adcac1951d5dc2115ed6ff922d1aa9046cfa1c57883ce"} Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.189485 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab727addc3fbd7ab267adcac1951d5dc2115ed6ff922d1aa9046cfa1c57883ce" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.189594 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-fwjs4" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.325014 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-ffn9w"] Nov 28 15:39:54 crc kubenswrapper[4857]: E1128 15:39:54.325670 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4608f83d-58f5-4bba-bc64-ba729a74e876" containerName="install-certs-openstack-openstack-cell1" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.325689 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4608f83d-58f5-4bba-bc64-ba729a74e876" containerName="install-certs-openstack-openstack-cell1" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.325966 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4608f83d-58f5-4bba-bc64-ba729a74e876" containerName="install-certs-openstack-openstack-cell1" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.327094 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.333432 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.333508 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.333597 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.333864 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.336882 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-ffn9w"] Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.530408 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.530638 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-inventory\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.530702 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ceph\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.530891 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmj4v\" (UniqueName: \"kubernetes.io/projected/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-kube-api-access-wmj4v\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.633139 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmj4v\" (UniqueName: \"kubernetes.io/projected/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-kube-api-access-wmj4v\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.633207 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.633340 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-inventory\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.633427 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ceph\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.637839 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ssh-key\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.637870 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-inventory\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.638612 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ceph\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.663053 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmj4v\" (UniqueName: \"kubernetes.io/projected/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-kube-api-access-wmj4v\") pod \"ceph-client-openstack-openstack-cell1-ffn9w\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:54 crc kubenswrapper[4857]: I1128 15:39:54.669621 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:39:55 crc kubenswrapper[4857]: I1128 15:39:55.231626 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceph-client-openstack-openstack-cell1-ffn9w"] Nov 28 15:39:56 crc kubenswrapper[4857]: I1128 15:39:56.217187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" event={"ID":"3c944ef4-8d62-4a01-8b7c-da8fba5d942f","Type":"ContainerStarted","Data":"1d7891f83b0b7c078432c83dca9946d70c4472b0d138690069011c4b91200b11"} Nov 28 15:39:56 crc kubenswrapper[4857]: I1128 15:39:56.217962 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" event={"ID":"3c944ef4-8d62-4a01-8b7c-da8fba5d942f","Type":"ContainerStarted","Data":"968ae636d862921bd47ce8aec12c9bca087d7100a9e9207c84bed4ea35a4f879"} Nov 28 15:39:56 crc kubenswrapper[4857]: I1128 15:39:56.259482 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" podStartSLOduration=2.020813776 podStartE2EDuration="2.259461195s" podCreationTimestamp="2025-11-28 15:39:54 +0000 UTC" firstStartedPulling="2025-11-28 15:39:55.238416627 +0000 UTC m=+7845.362358064" lastFinishedPulling="2025-11-28 15:39:55.477064056 +0000 UTC m=+7845.601005483" observedRunningTime="2025-11-28 15:39:56.244744823 +0000 UTC m=+7846.368686260" watchObservedRunningTime="2025-11-28 15:39:56.259461195 +0000 UTC m=+7846.383402632" Nov 28 15:40:01 crc kubenswrapper[4857]: I1128 15:40:01.274908 4857 generic.go:334] "Generic (PLEG): container finished" podID="3c944ef4-8d62-4a01-8b7c-da8fba5d942f" containerID="1d7891f83b0b7c078432c83dca9946d70c4472b0d138690069011c4b91200b11" exitCode=0 Nov 28 15:40:01 crc kubenswrapper[4857]: I1128 15:40:01.274982 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" event={"ID":"3c944ef4-8d62-4a01-8b7c-da8fba5d942f","Type":"ContainerDied","Data":"1d7891f83b0b7c078432c83dca9946d70c4472b0d138690069011c4b91200b11"} Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.849201 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.923762 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ceph\") pod \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.924309 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ssh-key\") pod \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.924525 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmj4v\" (UniqueName: \"kubernetes.io/projected/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-kube-api-access-wmj4v\") pod \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.924611 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-inventory\") pod \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\" (UID: \"3c944ef4-8d62-4a01-8b7c-da8fba5d942f\") " Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.945695 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ceph" (OuterVolumeSpecName: "ceph") pod "3c944ef4-8d62-4a01-8b7c-da8fba5d942f" (UID: "3c944ef4-8d62-4a01-8b7c-da8fba5d942f"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.945800 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-kube-api-access-wmj4v" (OuterVolumeSpecName: "kube-api-access-wmj4v") pod "3c944ef4-8d62-4a01-8b7c-da8fba5d942f" (UID: "3c944ef4-8d62-4a01-8b7c-da8fba5d942f"). InnerVolumeSpecName "kube-api-access-wmj4v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.964323 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-inventory" (OuterVolumeSpecName: "inventory") pod "3c944ef4-8d62-4a01-8b7c-da8fba5d942f" (UID: "3c944ef4-8d62-4a01-8b7c-da8fba5d942f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:40:02 crc kubenswrapper[4857]: I1128 15:40:02.966916 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3c944ef4-8d62-4a01-8b7c-da8fba5d942f" (UID: "3c944ef4-8d62-4a01-8b7c-da8fba5d942f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.027104 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.027149 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.027160 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.027171 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmj4v\" (UniqueName: \"kubernetes.io/projected/3c944ef4-8d62-4a01-8b7c-da8fba5d942f-kube-api-access-wmj4v\") on node \"crc\" DevicePath \"\"" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.307886 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" event={"ID":"3c944ef4-8d62-4a01-8b7c-da8fba5d942f","Type":"ContainerDied","Data":"968ae636d862921bd47ce8aec12c9bca087d7100a9e9207c84bed4ea35a4f879"} Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.307925 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="968ae636d862921bd47ce8aec12c9bca087d7100a9e9207c84bed4ea35a4f879" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.307963 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceph-client-openstack-openstack-cell1-ffn9w" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.415052 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-vxtjn"] Nov 28 15:40:03 crc kubenswrapper[4857]: E1128 15:40:03.415935 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c944ef4-8d62-4a01-8b7c-da8fba5d942f" containerName="ceph-client-openstack-openstack-cell1" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.416037 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c944ef4-8d62-4a01-8b7c-da8fba5d942f" containerName="ceph-client-openstack-openstack-cell1" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.416499 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c944ef4-8d62-4a01-8b7c-da8fba5d942f" containerName="ceph-client-openstack-openstack-cell1" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.417821 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.420385 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.420892 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.421068 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.421243 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.426404 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.428421 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-vxtjn"] Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.543048 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-inventory\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.543091 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvpph\" (UniqueName: \"kubernetes.io/projected/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-kube-api-access-rvpph\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.543117 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ceph\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.543282 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.543319 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ssh-key\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.543390 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.645314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.645375 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ssh-key\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.645452 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.645527 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-inventory\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.645549 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvpph\" (UniqueName: \"kubernetes.io/projected/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-kube-api-access-rvpph\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.645570 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ceph\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.646451 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.650544 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ssh-key\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.650654 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ceph\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.650973 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-inventory\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.651780 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.667789 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvpph\" (UniqueName: \"kubernetes.io/projected/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-kube-api-access-rvpph\") pod \"ovn-openstack-openstack-cell1-vxtjn\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:03 crc kubenswrapper[4857]: I1128 15:40:03.751677 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:40:04 crc kubenswrapper[4857]: I1128 15:40:04.312914 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-vxtjn"] Nov 28 15:40:04 crc kubenswrapper[4857]: W1128 15:40:04.315294 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd5526b8_d3ea_4744_9a5c_ec0c252b9391.slice/crio-2e3cab641fbfd2c2ba21d6d0d11f438997a1cd8a8fe98977cb7b14b57f64d16a WatchSource:0}: Error finding container 2e3cab641fbfd2c2ba21d6d0d11f438997a1cd8a8fe98977cb7b14b57f64d16a: Status 404 returned error can't find the container with id 2e3cab641fbfd2c2ba21d6d0d11f438997a1cd8a8fe98977cb7b14b57f64d16a Nov 28 15:40:05 crc kubenswrapper[4857]: I1128 15:40:05.344572 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" event={"ID":"bd5526b8-d3ea-4744-9a5c-ec0c252b9391","Type":"ContainerStarted","Data":"affab7dc6cc85edf33e225566ed1a2d4366b2ff0e380520d23c0c95d8738fb7c"} Nov 28 15:40:05 crc kubenswrapper[4857]: I1128 15:40:05.344885 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" event={"ID":"bd5526b8-d3ea-4744-9a5c-ec0c252b9391","Type":"ContainerStarted","Data":"2e3cab641fbfd2c2ba21d6d0d11f438997a1cd8a8fe98977cb7b14b57f64d16a"} Nov 28 15:40:05 crc kubenswrapper[4857]: I1128 15:40:05.372847 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" podStartSLOduration=2.161756844 podStartE2EDuration="2.372829829s" podCreationTimestamp="2025-11-28 15:40:03 +0000 UTC" firstStartedPulling="2025-11-28 15:40:04.317754494 +0000 UTC m=+7854.441695931" lastFinishedPulling="2025-11-28 15:40:04.528827479 +0000 UTC m=+7854.652768916" observedRunningTime="2025-11-28 15:40:05.366982063 +0000 UTC m=+7855.490923500" watchObservedRunningTime="2025-11-28 15:40:05.372829829 +0000 UTC m=+7855.496771256" Nov 28 15:40:11 crc kubenswrapper[4857]: I1128 15:40:11.309633 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:40:11 crc kubenswrapper[4857]: I1128 15:40:11.310372 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.308051 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.308564 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.308611 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.309722 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.309792 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" gracePeriod=600 Nov 28 15:40:41 crc kubenswrapper[4857]: E1128 15:40:41.666271 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.930535 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" exitCode=0 Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.930612 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55"} Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.930938 4857 scope.go:117] "RemoveContainer" containerID="a38ec213781fad7de97f923228698d06b20cca26201275d6ad39d000d8a37327" Nov 28 15:40:41 crc kubenswrapper[4857]: I1128 15:40:41.931806 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:40:41 crc kubenswrapper[4857]: E1128 15:40:41.932224 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:40:52 crc kubenswrapper[4857]: I1128 15:40:52.229117 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:40:52 crc kubenswrapper[4857]: E1128 15:40:52.230351 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:41:03 crc kubenswrapper[4857]: I1128 15:41:03.245559 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:41:03 crc kubenswrapper[4857]: E1128 15:41:03.247387 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:41:13 crc kubenswrapper[4857]: I1128 15:41:13.350105 4857 generic.go:334] "Generic (PLEG): container finished" podID="bd5526b8-d3ea-4744-9a5c-ec0c252b9391" containerID="affab7dc6cc85edf33e225566ed1a2d4366b2ff0e380520d23c0c95d8738fb7c" exitCode=0 Nov 28 15:41:13 crc kubenswrapper[4857]: I1128 15:41:13.350196 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" event={"ID":"bd5526b8-d3ea-4744-9a5c-ec0c252b9391","Type":"ContainerDied","Data":"affab7dc6cc85edf33e225566ed1a2d4366b2ff0e380520d23c0c95d8738fb7c"} Nov 28 15:41:14 crc kubenswrapper[4857]: I1128 15:41:14.901328 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.063731 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ceph\") pod \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.063926 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvpph\" (UniqueName: \"kubernetes.io/projected/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-kube-api-access-rvpph\") pod \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.064017 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovncontroller-config-0\") pod \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.064043 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ssh-key\") pod \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.064154 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-inventory\") pod \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.064202 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovn-combined-ca-bundle\") pod \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\" (UID: \"bd5526b8-d3ea-4744-9a5c-ec0c252b9391\") " Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.070015 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "bd5526b8-d3ea-4744-9a5c-ec0c252b9391" (UID: "bd5526b8-d3ea-4744-9a5c-ec0c252b9391"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.070348 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-kube-api-access-rvpph" (OuterVolumeSpecName: "kube-api-access-rvpph") pod "bd5526b8-d3ea-4744-9a5c-ec0c252b9391" (UID: "bd5526b8-d3ea-4744-9a5c-ec0c252b9391"). InnerVolumeSpecName "kube-api-access-rvpph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.075293 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ceph" (OuterVolumeSpecName: "ceph") pod "bd5526b8-d3ea-4744-9a5c-ec0c252b9391" (UID: "bd5526b8-d3ea-4744-9a5c-ec0c252b9391"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.094681 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "bd5526b8-d3ea-4744-9a5c-ec0c252b9391" (UID: "bd5526b8-d3ea-4744-9a5c-ec0c252b9391"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.096230 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "bd5526b8-d3ea-4744-9a5c-ec0c252b9391" (UID: "bd5526b8-d3ea-4744-9a5c-ec0c252b9391"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.107619 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-inventory" (OuterVolumeSpecName: "inventory") pod "bd5526b8-d3ea-4744-9a5c-ec0c252b9391" (UID: "bd5526b8-d3ea-4744-9a5c-ec0c252b9391"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.169607 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.169664 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.169685 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvpph\" (UniqueName: \"kubernetes.io/projected/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-kube-api-access-rvpph\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.169705 4857 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.169723 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.169740 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/bd5526b8-d3ea-4744-9a5c-ec0c252b9391-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.229454 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:41:15 crc kubenswrapper[4857]: E1128 15:41:15.230159 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.372550 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" event={"ID":"bd5526b8-d3ea-4744-9a5c-ec0c252b9391","Type":"ContainerDied","Data":"2e3cab641fbfd2c2ba21d6d0d11f438997a1cd8a8fe98977cb7b14b57f64d16a"} Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.372610 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-vxtjn" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.372611 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e3cab641fbfd2c2ba21d6d0d11f438997a1cd8a8fe98977cb7b14b57f64d16a" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.465818 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-q4kwz"] Nov 28 15:41:15 crc kubenswrapper[4857]: E1128 15:41:15.469332 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd5526b8-d3ea-4744-9a5c-ec0c252b9391" containerName="ovn-openstack-openstack-cell1" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.469360 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd5526b8-d3ea-4744-9a5c-ec0c252b9391" containerName="ovn-openstack-openstack-cell1" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.469597 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd5526b8-d3ea-4744-9a5c-ec0c252b9391" containerName="ovn-openstack-openstack-cell1" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.470351 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.472154 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.475622 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.475795 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.476035 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.476174 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.476332 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.478142 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-q4kwz"] Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577447 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577503 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577521 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h58hq\" (UniqueName: \"kubernetes.io/projected/599f242a-30de-4d34-be24-3695bc92abcf-kube-api-access-h58hq\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577553 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577581 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.577636 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680357 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680502 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680543 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680588 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680606 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h58hq\" (UniqueName: \"kubernetes.io/projected/599f242a-30de-4d34-be24-3695bc92abcf-kube-api-access-h58hq\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680635 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.680663 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.686168 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.686281 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.686438 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.686718 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.688733 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.692670 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ceph\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.703120 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h58hq\" (UniqueName: \"kubernetes.io/projected/599f242a-30de-4d34-be24-3695bc92abcf-kube-api-access-h58hq\") pod \"neutron-metadata-openstack-openstack-cell1-q4kwz\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:15 crc kubenswrapper[4857]: I1128 15:41:15.833649 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:41:16 crc kubenswrapper[4857]: I1128 15:41:16.379698 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-q4kwz"] Nov 28 15:41:17 crc kubenswrapper[4857]: I1128 15:41:17.396759 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" event={"ID":"599f242a-30de-4d34-be24-3695bc92abcf","Type":"ContainerStarted","Data":"7d04bbe1433dd0fed9245439156d68eb9ce776d7bd0d814d8cde5bc07cae470a"} Nov 28 15:41:17 crc kubenswrapper[4857]: I1128 15:41:17.397706 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" event={"ID":"599f242a-30de-4d34-be24-3695bc92abcf","Type":"ContainerStarted","Data":"612730da8c498a52f352795b565d1c8f830e4a9f99986a0766f91fde38297240"} Nov 28 15:41:17 crc kubenswrapper[4857]: I1128 15:41:17.422478 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" podStartSLOduration=2.183311057 podStartE2EDuration="2.42245646s" podCreationTimestamp="2025-11-28 15:41:15 +0000 UTC" firstStartedPulling="2025-11-28 15:41:16.385047535 +0000 UTC m=+7926.508988972" lastFinishedPulling="2025-11-28 15:41:16.624192938 +0000 UTC m=+7926.748134375" observedRunningTime="2025-11-28 15:41:17.42210127 +0000 UTC m=+7927.546042787" watchObservedRunningTime="2025-11-28 15:41:17.42245646 +0000 UTC m=+7927.546397907" Nov 28 15:41:29 crc kubenswrapper[4857]: I1128 15:41:29.229177 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:41:29 crc kubenswrapper[4857]: E1128 15:41:29.229792 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:41:42 crc kubenswrapper[4857]: I1128 15:41:42.230252 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:41:42 crc kubenswrapper[4857]: E1128 15:41:42.231271 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:41:57 crc kubenswrapper[4857]: I1128 15:41:57.229624 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:41:57 crc kubenswrapper[4857]: E1128 15:41:57.230434 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:42:10 crc kubenswrapper[4857]: I1128 15:42:10.237371 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:42:10 crc kubenswrapper[4857]: E1128 15:42:10.239879 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:42:11 crc kubenswrapper[4857]: I1128 15:42:11.073452 4857 generic.go:334] "Generic (PLEG): container finished" podID="599f242a-30de-4d34-be24-3695bc92abcf" containerID="7d04bbe1433dd0fed9245439156d68eb9ce776d7bd0d814d8cde5bc07cae470a" exitCode=0 Nov 28 15:42:11 crc kubenswrapper[4857]: I1128 15:42:11.073562 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" event={"ID":"599f242a-30de-4d34-be24-3695bc92abcf","Type":"ContainerDied","Data":"7d04bbe1433dd0fed9245439156d68eb9ce776d7bd0d814d8cde5bc07cae470a"} Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.645960 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838123 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ceph\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838380 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ssh-key\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838415 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h58hq\" (UniqueName: \"kubernetes.io/projected/599f242a-30de-4d34-be24-3695bc92abcf-kube-api-access-h58hq\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838475 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-inventory\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-metadata-combined-ca-bundle\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838546 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-ovn-metadata-agent-neutron-config-0\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.838594 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-nova-metadata-neutron-config-0\") pod \"599f242a-30de-4d34-be24-3695bc92abcf\" (UID: \"599f242a-30de-4d34-be24-3695bc92abcf\") " Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.844310 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ceph" (OuterVolumeSpecName: "ceph") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.850068 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/599f242a-30de-4d34-be24-3695bc92abcf-kube-api-access-h58hq" (OuterVolumeSpecName: "kube-api-access-h58hq") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "kube-api-access-h58hq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.853516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.870494 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.882266 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-inventory" (OuterVolumeSpecName: "inventory") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.886792 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.891649 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "599f242a-30de-4d34-be24-3695bc92abcf" (UID: "599f242a-30de-4d34-be24-3695bc92abcf"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941506 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941542 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h58hq\" (UniqueName: \"kubernetes.io/projected/599f242a-30de-4d34-be24-3695bc92abcf-kube-api-access-h58hq\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941559 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941572 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941586 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941600 4857 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:12 crc kubenswrapper[4857]: I1128 15:42:12.941612 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/599f242a-30de-4d34-be24-3695bc92abcf-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.101437 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.101345 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-q4kwz" event={"ID":"599f242a-30de-4d34-be24-3695bc92abcf","Type":"ContainerDied","Data":"612730da8c498a52f352795b565d1c8f830e4a9f99986a0766f91fde38297240"} Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.107853 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="612730da8c498a52f352795b565d1c8f830e4a9f99986a0766f91fde38297240" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.211918 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-6nnr8"] Nov 28 15:42:13 crc kubenswrapper[4857]: E1128 15:42:13.213150 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="599f242a-30de-4d34-be24-3695bc92abcf" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.213187 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="599f242a-30de-4d34-be24-3695bc92abcf" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.213507 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="599f242a-30de-4d34-be24-3695bc92abcf" containerName="neutron-metadata-openstack-openstack-cell1" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.217092 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.220906 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.223299 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.223919 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.224041 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.225788 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-6nnr8"] Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.227898 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.352374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-inventory\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.352484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ssh-key\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.352526 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txd86\" (UniqueName: \"kubernetes.io/projected/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-kube-api-access-txd86\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.352761 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.352798 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.352830 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ceph\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.455229 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-inventory\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.455317 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ssh-key\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.455363 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txd86\" (UniqueName: \"kubernetes.io/projected/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-kube-api-access-txd86\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.455560 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.455590 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.455611 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ceph\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.460761 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.461115 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ceph\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.461702 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.463228 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ssh-key\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.466568 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-inventory\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.479242 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txd86\" (UniqueName: \"kubernetes.io/projected/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-kube-api-access-txd86\") pod \"libvirt-openstack-openstack-cell1-6nnr8\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:13 crc kubenswrapper[4857]: I1128 15:42:13.546539 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:42:15 crc kubenswrapper[4857]: I1128 15:42:14.139089 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-6nnr8"] Nov 28 15:42:15 crc kubenswrapper[4857]: I1128 15:42:14.154665 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:42:15 crc kubenswrapper[4857]: I1128 15:42:15.135972 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" event={"ID":"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c","Type":"ContainerStarted","Data":"bc7346336b5c00de0cdd25dc3827859d9b167a84cec65869c859aefae1bd29a3"} Nov 28 15:42:16 crc kubenswrapper[4857]: I1128 15:42:16.163133 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" event={"ID":"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c","Type":"ContainerStarted","Data":"975e54ecfd5d4cab81715469711536ff3a0fe1c81f828ecf74c9dc57dbc4e0b6"} Nov 28 15:42:16 crc kubenswrapper[4857]: I1128 15:42:16.205049 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" podStartSLOduration=2.233423787 podStartE2EDuration="3.204927035s" podCreationTimestamp="2025-11-28 15:42:13 +0000 UTC" firstStartedPulling="2025-11-28 15:42:14.154392275 +0000 UTC m=+7984.278333712" lastFinishedPulling="2025-11-28 15:42:15.125895503 +0000 UTC m=+7985.249836960" observedRunningTime="2025-11-28 15:42:16.186619137 +0000 UTC m=+7986.310560574" watchObservedRunningTime="2025-11-28 15:42:16.204927035 +0000 UTC m=+7986.328868482" Nov 28 15:42:22 crc kubenswrapper[4857]: I1128 15:42:22.229895 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:42:22 crc kubenswrapper[4857]: E1128 15:42:22.231128 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.571356 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gcdl5"] Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.574503 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.601177 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gcdl5"] Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.747090 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/c3822d51-a2ad-4378-82fc-045698eb4abc-kube-api-access-c8jdk\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.747228 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-utilities\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.747408 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-catalog-content\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.849819 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-utilities\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.849919 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-catalog-content\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.850261 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/c3822d51-a2ad-4378-82fc-045698eb4abc-kube-api-access-c8jdk\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.850439 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-utilities\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.850526 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-catalog-content\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.872345 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/c3822d51-a2ad-4378-82fc-045698eb4abc-kube-api-access-c8jdk\") pod \"redhat-marketplace-gcdl5\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:29 crc kubenswrapper[4857]: I1128 15:42:29.904434 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:30 crc kubenswrapper[4857]: I1128 15:42:30.412460 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gcdl5"] Nov 28 15:42:31 crc kubenswrapper[4857]: I1128 15:42:31.330455 4857 generic.go:334] "Generic (PLEG): container finished" podID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerID="f816eb1a9abd1d25f86adddff9a37a4d814eeb19d726fb73263dae6d92002dd1" exitCode=0 Nov 28 15:42:31 crc kubenswrapper[4857]: I1128 15:42:31.330525 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gcdl5" event={"ID":"c3822d51-a2ad-4378-82fc-045698eb4abc","Type":"ContainerDied","Data":"f816eb1a9abd1d25f86adddff9a37a4d814eeb19d726fb73263dae6d92002dd1"} Nov 28 15:42:31 crc kubenswrapper[4857]: I1128 15:42:31.331023 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gcdl5" event={"ID":"c3822d51-a2ad-4378-82fc-045698eb4abc","Type":"ContainerStarted","Data":"03f64f3c65274153e9c61bc290552c12ad88f1d8568e905f0b02e6c42c0053f0"} Nov 28 15:42:33 crc kubenswrapper[4857]: I1128 15:42:33.363230 4857 generic.go:334] "Generic (PLEG): container finished" podID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerID="a8a73ba3bf8c734c60eeacbd7fe3c034138ccec62b670c3b7c9eb9dff531809d" exitCode=0 Nov 28 15:42:33 crc kubenswrapper[4857]: I1128 15:42:33.364105 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gcdl5" event={"ID":"c3822d51-a2ad-4378-82fc-045698eb4abc","Type":"ContainerDied","Data":"a8a73ba3bf8c734c60eeacbd7fe3c034138ccec62b670c3b7c9eb9dff531809d"} Nov 28 15:42:34 crc kubenswrapper[4857]: I1128 15:42:34.376845 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gcdl5" event={"ID":"c3822d51-a2ad-4378-82fc-045698eb4abc","Type":"ContainerStarted","Data":"eb4c53c1d76a8bccf3bb52a0d8cf55ced840e9e6e4c5f936dea7df21ad21bc08"} Nov 28 15:42:34 crc kubenswrapper[4857]: I1128 15:42:34.397792 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gcdl5" podStartSLOduration=2.734342248 podStartE2EDuration="5.397777212s" podCreationTimestamp="2025-11-28 15:42:29 +0000 UTC" firstStartedPulling="2025-11-28 15:42:31.33283261 +0000 UTC m=+8001.456774047" lastFinishedPulling="2025-11-28 15:42:33.996267554 +0000 UTC m=+8004.120209011" observedRunningTime="2025-11-28 15:42:34.393622431 +0000 UTC m=+8004.517563868" watchObservedRunningTime="2025-11-28 15:42:34.397777212 +0000 UTC m=+8004.521718649" Nov 28 15:42:35 crc kubenswrapper[4857]: I1128 15:42:35.229193 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:42:35 crc kubenswrapper[4857]: E1128 15:42:35.229503 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:42:39 crc kubenswrapper[4857]: I1128 15:42:39.904988 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:39 crc kubenswrapper[4857]: I1128 15:42:39.906358 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:40 crc kubenswrapper[4857]: I1128 15:42:40.026402 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:40 crc kubenswrapper[4857]: I1128 15:42:40.494340 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:40 crc kubenswrapper[4857]: I1128 15:42:40.556015 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gcdl5"] Nov 28 15:42:42 crc kubenswrapper[4857]: I1128 15:42:42.454810 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gcdl5" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="registry-server" containerID="cri-o://eb4c53c1d76a8bccf3bb52a0d8cf55ced840e9e6e4c5f936dea7df21ad21bc08" gracePeriod=2 Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.465069 4857 generic.go:334] "Generic (PLEG): container finished" podID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerID="eb4c53c1d76a8bccf3bb52a0d8cf55ced840e9e6e4c5f936dea7df21ad21bc08" exitCode=0 Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.465159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gcdl5" event={"ID":"c3822d51-a2ad-4378-82fc-045698eb4abc","Type":"ContainerDied","Data":"eb4c53c1d76a8bccf3bb52a0d8cf55ced840e9e6e4c5f936dea7df21ad21bc08"} Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.465341 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gcdl5" event={"ID":"c3822d51-a2ad-4378-82fc-045698eb4abc","Type":"ContainerDied","Data":"03f64f3c65274153e9c61bc290552c12ad88f1d8568e905f0b02e6c42c0053f0"} Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.465357 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="03f64f3c65274153e9c61bc290552c12ad88f1d8568e905f0b02e6c42c0053f0" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.491781 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.583028 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/c3822d51-a2ad-4378-82fc-045698eb4abc-kube-api-access-c8jdk\") pod \"c3822d51-a2ad-4378-82fc-045698eb4abc\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.583169 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-catalog-content\") pod \"c3822d51-a2ad-4378-82fc-045698eb4abc\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.583191 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-utilities\") pod \"c3822d51-a2ad-4378-82fc-045698eb4abc\" (UID: \"c3822d51-a2ad-4378-82fc-045698eb4abc\") " Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.584226 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-utilities" (OuterVolumeSpecName: "utilities") pod "c3822d51-a2ad-4378-82fc-045698eb4abc" (UID: "c3822d51-a2ad-4378-82fc-045698eb4abc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.589385 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3822d51-a2ad-4378-82fc-045698eb4abc-kube-api-access-c8jdk" (OuterVolumeSpecName: "kube-api-access-c8jdk") pod "c3822d51-a2ad-4378-82fc-045698eb4abc" (UID: "c3822d51-a2ad-4378-82fc-045698eb4abc"). InnerVolumeSpecName "kube-api-access-c8jdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.600357 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c3822d51-a2ad-4378-82fc-045698eb4abc" (UID: "c3822d51-a2ad-4378-82fc-045698eb4abc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.685742 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.685780 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c3822d51-a2ad-4378-82fc-045698eb4abc-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:43 crc kubenswrapper[4857]: I1128 15:42:43.685795 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c8jdk\" (UniqueName: \"kubernetes.io/projected/c3822d51-a2ad-4378-82fc-045698eb4abc-kube-api-access-c8jdk\") on node \"crc\" DevicePath \"\"" Nov 28 15:42:44 crc kubenswrapper[4857]: I1128 15:42:44.476441 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gcdl5" Nov 28 15:42:44 crc kubenswrapper[4857]: I1128 15:42:44.501878 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gcdl5"] Nov 28 15:42:44 crc kubenswrapper[4857]: I1128 15:42:44.513192 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gcdl5"] Nov 28 15:42:46 crc kubenswrapper[4857]: I1128 15:42:46.229862 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:42:46 crc kubenswrapper[4857]: E1128 15:42:46.230406 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:42:46 crc kubenswrapper[4857]: I1128 15:42:46.245157 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" path="/var/lib/kubelet/pods/c3822d51-a2ad-4378-82fc-045698eb4abc/volumes" Nov 28 15:42:57 crc kubenswrapper[4857]: I1128 15:42:57.229775 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:42:57 crc kubenswrapper[4857]: E1128 15:42:57.231118 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:43:09 crc kubenswrapper[4857]: I1128 15:43:09.228941 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:43:09 crc kubenswrapper[4857]: E1128 15:43:09.229857 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.158609 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-r88mh"] Nov 28 15:43:16 crc kubenswrapper[4857]: E1128 15:43:16.160144 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="registry-server" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.160169 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="registry-server" Nov 28 15:43:16 crc kubenswrapper[4857]: E1128 15:43:16.160206 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="extract-content" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.160218 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="extract-content" Nov 28 15:43:16 crc kubenswrapper[4857]: E1128 15:43:16.160249 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="extract-utilities" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.160262 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="extract-utilities" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.160660 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3822d51-a2ad-4378-82fc-045698eb4abc" containerName="registry-server" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.163493 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.183621 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-utilities\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.183824 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-catalog-content\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.184130 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x2p6m\" (UniqueName: \"kubernetes.io/projected/afb78493-f170-48a6-a7cb-9f0fc88c10f4-kube-api-access-x2p6m\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.185137 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r88mh"] Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.286606 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-utilities\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.286697 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-catalog-content\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.286793 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x2p6m\" (UniqueName: \"kubernetes.io/projected/afb78493-f170-48a6-a7cb-9f0fc88c10f4-kube-api-access-x2p6m\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.287219 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-utilities\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.287333 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-catalog-content\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.311728 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x2p6m\" (UniqueName: \"kubernetes.io/projected/afb78493-f170-48a6-a7cb-9f0fc88c10f4-kube-api-access-x2p6m\") pod \"certified-operators-r88mh\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.502080 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:16 crc kubenswrapper[4857]: I1128 15:43:16.988337 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-r88mh"] Nov 28 15:43:17 crc kubenswrapper[4857]: I1128 15:43:17.856054 4857 generic.go:334] "Generic (PLEG): container finished" podID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerID="7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f" exitCode=0 Nov 28 15:43:17 crc kubenswrapper[4857]: I1128 15:43:17.856167 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerDied","Data":"7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f"} Nov 28 15:43:17 crc kubenswrapper[4857]: I1128 15:43:17.856406 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerStarted","Data":"3220cd5f2e6eab78d3c673e0a5649514a3c60d6b15141e3a779d38f14231d68b"} Nov 28 15:43:18 crc kubenswrapper[4857]: I1128 15:43:18.874041 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerStarted","Data":"cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4"} Nov 28 15:43:19 crc kubenswrapper[4857]: I1128 15:43:19.889495 4857 generic.go:334] "Generic (PLEG): container finished" podID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerID="cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4" exitCode=0 Nov 28 15:43:19 crc kubenswrapper[4857]: I1128 15:43:19.889578 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerDied","Data":"cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4"} Nov 28 15:43:20 crc kubenswrapper[4857]: I1128 15:43:20.901328 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerStarted","Data":"ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f"} Nov 28 15:43:20 crc kubenswrapper[4857]: I1128 15:43:20.926049 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-r88mh" podStartSLOduration=2.331463413 podStartE2EDuration="4.926029882s" podCreationTimestamp="2025-11-28 15:43:16 +0000 UTC" firstStartedPulling="2025-11-28 15:43:17.858991304 +0000 UTC m=+8047.982932781" lastFinishedPulling="2025-11-28 15:43:20.453557813 +0000 UTC m=+8050.577499250" observedRunningTime="2025-11-28 15:43:20.917528825 +0000 UTC m=+8051.041470262" watchObservedRunningTime="2025-11-28 15:43:20.926029882 +0000 UTC m=+8051.049971319" Nov 28 15:43:23 crc kubenswrapper[4857]: I1128 15:43:23.229775 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:43:23 crc kubenswrapper[4857]: E1128 15:43:23.230787 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:43:26 crc kubenswrapper[4857]: I1128 15:43:26.502743 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:26 crc kubenswrapper[4857]: I1128 15:43:26.504159 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:26 crc kubenswrapper[4857]: I1128 15:43:26.577601 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:27 crc kubenswrapper[4857]: I1128 15:43:27.035552 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:27 crc kubenswrapper[4857]: I1128 15:43:27.091955 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r88mh"] Nov 28 15:43:28 crc kubenswrapper[4857]: I1128 15:43:28.996029 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-r88mh" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="registry-server" containerID="cri-o://ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f" gracePeriod=2 Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.468453 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.619571 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2p6m\" (UniqueName: \"kubernetes.io/projected/afb78493-f170-48a6-a7cb-9f0fc88c10f4-kube-api-access-x2p6m\") pod \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.619695 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-catalog-content\") pod \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.619801 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-utilities\") pod \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\" (UID: \"afb78493-f170-48a6-a7cb-9f0fc88c10f4\") " Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.620769 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-utilities" (OuterVolumeSpecName: "utilities") pod "afb78493-f170-48a6-a7cb-9f0fc88c10f4" (UID: "afb78493-f170-48a6-a7cb-9f0fc88c10f4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.625439 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afb78493-f170-48a6-a7cb-9f0fc88c10f4-kube-api-access-x2p6m" (OuterVolumeSpecName: "kube-api-access-x2p6m") pod "afb78493-f170-48a6-a7cb-9f0fc88c10f4" (UID: "afb78493-f170-48a6-a7cb-9f0fc88c10f4"). InnerVolumeSpecName "kube-api-access-x2p6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.673242 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "afb78493-f170-48a6-a7cb-9f0fc88c10f4" (UID: "afb78493-f170-48a6-a7cb-9f0fc88c10f4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.722803 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2p6m\" (UniqueName: \"kubernetes.io/projected/afb78493-f170-48a6-a7cb-9f0fc88c10f4-kube-api-access-x2p6m\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.722841 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:29 crc kubenswrapper[4857]: I1128 15:43:29.722850 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/afb78493-f170-48a6-a7cb-9f0fc88c10f4-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.008068 4857 generic.go:334] "Generic (PLEG): container finished" podID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerID="ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f" exitCode=0 Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.008109 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerDied","Data":"ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f"} Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.008144 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-r88mh" event={"ID":"afb78493-f170-48a6-a7cb-9f0fc88c10f4","Type":"ContainerDied","Data":"3220cd5f2e6eab78d3c673e0a5649514a3c60d6b15141e3a779d38f14231d68b"} Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.008163 4857 scope.go:117] "RemoveContainer" containerID="ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.009241 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-r88mh" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.060896 4857 scope.go:117] "RemoveContainer" containerID="cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.075837 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-r88mh"] Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.092077 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-r88mh"] Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.092215 4857 scope.go:117] "RemoveContainer" containerID="7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.151412 4857 scope.go:117] "RemoveContainer" containerID="ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f" Nov 28 15:43:30 crc kubenswrapper[4857]: E1128 15:43:30.152180 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f\": container with ID starting with ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f not found: ID does not exist" containerID="ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.152238 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f"} err="failed to get container status \"ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f\": rpc error: code = NotFound desc = could not find container \"ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f\": container with ID starting with ca4fd647420e4d3688c43504f562565f49b5f9a08d3f467ca2e548bd4462eb0f not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.152281 4857 scope.go:117] "RemoveContainer" containerID="cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4" Nov 28 15:43:30 crc kubenswrapper[4857]: E1128 15:43:30.152584 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4\": container with ID starting with cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4 not found: ID does not exist" containerID="cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.152607 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4"} err="failed to get container status \"cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4\": rpc error: code = NotFound desc = could not find container \"cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4\": container with ID starting with cd03535adf2930225ba61fedad37d482a1a3ecf248f938f3946f917d689e06b4 not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.152624 4857 scope.go:117] "RemoveContainer" containerID="7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f" Nov 28 15:43:30 crc kubenswrapper[4857]: E1128 15:43:30.152852 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f\": container with ID starting with 7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f not found: ID does not exist" containerID="7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.152890 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f"} err="failed to get container status \"7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f\": rpc error: code = NotFound desc = could not find container \"7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f\": container with ID starting with 7bfbe879ee5d88ef76d664aa99d6aa897d0fc775cc7f411fa4ae2e798212739f not found: ID does not exist" Nov 28 15:43:30 crc kubenswrapper[4857]: I1128 15:43:30.242272 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" path="/var/lib/kubelet/pods/afb78493-f170-48a6-a7cb-9f0fc88c10f4/volumes" Nov 28 15:43:31 crc kubenswrapper[4857]: I1128 15:43:31.609477 4857 scope.go:117] "RemoveContainer" containerID="04607283e04a494091f7ddc925060dac9ee052342871c32d0fce4aee9899c52a" Nov 28 15:43:31 crc kubenswrapper[4857]: I1128 15:43:31.650003 4857 scope.go:117] "RemoveContainer" containerID="b7f64186f2e4e8b91c9ea2a446427a4e54b7c5eeab1175de345494b9fd9c4c63" Nov 28 15:43:38 crc kubenswrapper[4857]: I1128 15:43:38.229409 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:43:38 crc kubenswrapper[4857]: E1128 15:43:38.230666 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:43:51 crc kubenswrapper[4857]: I1128 15:43:51.229535 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:43:51 crc kubenswrapper[4857]: E1128 15:43:51.230721 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:44:06 crc kubenswrapper[4857]: I1128 15:44:06.229986 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:44:06 crc kubenswrapper[4857]: E1128 15:44:06.231219 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:44:20 crc kubenswrapper[4857]: I1128 15:44:20.229312 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:44:20 crc kubenswrapper[4857]: E1128 15:44:20.230176 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:44:31 crc kubenswrapper[4857]: I1128 15:44:31.229073 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:44:31 crc kubenswrapper[4857]: E1128 15:44:31.230092 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:44:31 crc kubenswrapper[4857]: I1128 15:44:31.778079 4857 scope.go:117] "RemoveContainer" containerID="cddcb2cd1e1ee8abc8415148e18051ae32f9908ead6dcc093ebbb6fbe5e646fb" Nov 28 15:44:43 crc kubenswrapper[4857]: I1128 15:44:43.230498 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:44:43 crc kubenswrapper[4857]: E1128 15:44:43.231279 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:44:57 crc kubenswrapper[4857]: I1128 15:44:57.229297 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:44:57 crc kubenswrapper[4857]: E1128 15:44:57.230358 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.256585 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z"] Nov 28 15:45:00 crc kubenswrapper[4857]: E1128 15:45:00.257577 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="extract-content" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.257593 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="extract-content" Nov 28 15:45:00 crc kubenswrapper[4857]: E1128 15:45:00.257614 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="registry-server" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.257623 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="registry-server" Nov 28 15:45:00 crc kubenswrapper[4857]: E1128 15:45:00.257719 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="extract-utilities" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.257730 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="extract-utilities" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.257987 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="afb78493-f170-48a6-a7cb-9f0fc88c10f4" containerName="registry-server" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.258879 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z"] Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.258978 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.266460 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.266568 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.450149 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nngd\" (UniqueName: \"kubernetes.io/projected/ec6315b8-2c41-4dce-8fa6-badb75453f9c-kube-api-access-2nngd\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.450449 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec6315b8-2c41-4dce-8fa6-badb75453f9c-secret-volume\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.450669 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec6315b8-2c41-4dce-8fa6-badb75453f9c-config-volume\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.552842 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec6315b8-2c41-4dce-8fa6-badb75453f9c-secret-volume\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.552962 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec6315b8-2c41-4dce-8fa6-badb75453f9c-config-volume\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.553064 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nngd\" (UniqueName: \"kubernetes.io/projected/ec6315b8-2c41-4dce-8fa6-badb75453f9c-kube-api-access-2nngd\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.554051 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec6315b8-2c41-4dce-8fa6-badb75453f9c-config-volume\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.569567 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec6315b8-2c41-4dce-8fa6-badb75453f9c-secret-volume\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.569629 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nngd\" (UniqueName: \"kubernetes.io/projected/ec6315b8-2c41-4dce-8fa6-badb75453f9c-kube-api-access-2nngd\") pod \"collect-profiles-29405745-rkz2z\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:00 crc kubenswrapper[4857]: I1128 15:45:00.599142 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:01 crc kubenswrapper[4857]: I1128 15:45:01.097840 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z"] Nov 28 15:45:02 crc kubenswrapper[4857]: I1128 15:45:02.077072 4857 generic.go:334] "Generic (PLEG): container finished" podID="ec6315b8-2c41-4dce-8fa6-badb75453f9c" containerID="5544fbb6eff98ceb8c3cf47bba7de6439fdcc7bf0f09db2f46ad1559622b3883" exitCode=0 Nov 28 15:45:02 crc kubenswrapper[4857]: I1128 15:45:02.077635 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" event={"ID":"ec6315b8-2c41-4dce-8fa6-badb75453f9c","Type":"ContainerDied","Data":"5544fbb6eff98ceb8c3cf47bba7de6439fdcc7bf0f09db2f46ad1559622b3883"} Nov 28 15:45:02 crc kubenswrapper[4857]: I1128 15:45:02.077684 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" event={"ID":"ec6315b8-2c41-4dce-8fa6-badb75453f9c","Type":"ContainerStarted","Data":"8254786bcfbd5808f99f8e2d385c0437bb5c9fa739867231b3ef8f7e13903f10"} Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.534182 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.714154 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec6315b8-2c41-4dce-8fa6-badb75453f9c-config-volume\") pod \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.714228 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec6315b8-2c41-4dce-8fa6-badb75453f9c-secret-volume\") pod \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.714296 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nngd\" (UniqueName: \"kubernetes.io/projected/ec6315b8-2c41-4dce-8fa6-badb75453f9c-kube-api-access-2nngd\") pod \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\" (UID: \"ec6315b8-2c41-4dce-8fa6-badb75453f9c\") " Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.714735 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec6315b8-2c41-4dce-8fa6-badb75453f9c-config-volume" (OuterVolumeSpecName: "config-volume") pod "ec6315b8-2c41-4dce-8fa6-badb75453f9c" (UID: "ec6315b8-2c41-4dce-8fa6-badb75453f9c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.719806 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ec6315b8-2c41-4dce-8fa6-badb75453f9c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ec6315b8-2c41-4dce-8fa6-badb75453f9c" (UID: "ec6315b8-2c41-4dce-8fa6-badb75453f9c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.719864 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec6315b8-2c41-4dce-8fa6-badb75453f9c-kube-api-access-2nngd" (OuterVolumeSpecName: "kube-api-access-2nngd") pod "ec6315b8-2c41-4dce-8fa6-badb75453f9c" (UID: "ec6315b8-2c41-4dce-8fa6-badb75453f9c"). InnerVolumeSpecName "kube-api-access-2nngd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.817363 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ec6315b8-2c41-4dce-8fa6-badb75453f9c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.817414 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nngd\" (UniqueName: \"kubernetes.io/projected/ec6315b8-2c41-4dce-8fa6-badb75453f9c-kube-api-access-2nngd\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:03 crc kubenswrapper[4857]: I1128 15:45:03.817427 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ec6315b8-2c41-4dce-8fa6-badb75453f9c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 15:45:04 crc kubenswrapper[4857]: I1128 15:45:04.100077 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" event={"ID":"ec6315b8-2c41-4dce-8fa6-badb75453f9c","Type":"ContainerDied","Data":"8254786bcfbd5808f99f8e2d385c0437bb5c9fa739867231b3ef8f7e13903f10"} Nov 28 15:45:04 crc kubenswrapper[4857]: I1128 15:45:04.100131 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8254786bcfbd5808f99f8e2d385c0437bb5c9fa739867231b3ef8f7e13903f10" Nov 28 15:45:04 crc kubenswrapper[4857]: I1128 15:45:04.100135 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z" Nov 28 15:45:04 crc kubenswrapper[4857]: I1128 15:45:04.606128 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl"] Nov 28 15:45:04 crc kubenswrapper[4857]: I1128 15:45:04.614632 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405700-9mfxl"] Nov 28 15:45:06 crc kubenswrapper[4857]: I1128 15:45:06.251961 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ffebd404-bed5-47cb-b62c-2623c1b59568" path="/var/lib/kubelet/pods/ffebd404-bed5-47cb-b62c-2623c1b59568/volumes" Nov 28 15:45:10 crc kubenswrapper[4857]: I1128 15:45:10.260290 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:45:10 crc kubenswrapper[4857]: E1128 15:45:10.263878 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:45:22 crc kubenswrapper[4857]: I1128 15:45:22.230218 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:45:22 crc kubenswrapper[4857]: E1128 15:45:22.231112 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:45:31 crc kubenswrapper[4857]: I1128 15:45:31.827524 4857 scope.go:117] "RemoveContainer" containerID="106b2e90d47e7d80d423598d3a08726a893f506fb0b106f5174320ee76fa6161" Nov 28 15:45:36 crc kubenswrapper[4857]: I1128 15:45:36.230753 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:45:36 crc kubenswrapper[4857]: E1128 15:45:36.231794 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:45:49 crc kubenswrapper[4857]: I1128 15:45:49.229889 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:45:49 crc kubenswrapper[4857]: I1128 15:45:49.573662 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"96dea3fb6a548f2ab8de22dad0085e59debb7ac85a02180511c7f1c1fa4f68a4"} Nov 28 15:47:03 crc kubenswrapper[4857]: I1128 15:47:03.438629 4857 generic.go:334] "Generic (PLEG): container finished" podID="72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" containerID="975e54ecfd5d4cab81715469711536ff3a0fe1c81f828ecf74c9dc57dbc4e0b6" exitCode=0 Nov 28 15:47:03 crc kubenswrapper[4857]: I1128 15:47:03.438720 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" event={"ID":"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c","Type":"ContainerDied","Data":"975e54ecfd5d4cab81715469711536ff3a0fe1c81f828ecf74c9dc57dbc4e0b6"} Nov 28 15:47:04 crc kubenswrapper[4857]: I1128 15:47:04.948003 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.078791 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ceph\") pod \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.078939 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txd86\" (UniqueName: \"kubernetes.io/projected/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-kube-api-access-txd86\") pod \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.078987 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-combined-ca-bundle\") pod \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.079043 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ssh-key\") pod \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.079151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-secret-0\") pod \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.079231 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-inventory\") pod \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\" (UID: \"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c\") " Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.084553 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ceph" (OuterVolumeSpecName: "ceph") pod "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" (UID: "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.086303 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" (UID: "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.087647 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-kube-api-access-txd86" (OuterVolumeSpecName: "kube-api-access-txd86") pod "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" (UID: "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c"). InnerVolumeSpecName "kube-api-access-txd86". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.108209 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" (UID: "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.121425 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-inventory" (OuterVolumeSpecName: "inventory") pod "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" (UID: "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.123444 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" (UID: "72fcedc3-8360-4cff-a0ec-a6e9adc9d54c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.182001 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.182040 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txd86\" (UniqueName: \"kubernetes.io/projected/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-kube-api-access-txd86\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.182051 4857 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.182059 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.182071 4857 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.182079 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/72fcedc3-8360-4cff-a0ec-a6e9adc9d54c-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.468914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" event={"ID":"72fcedc3-8360-4cff-a0ec-a6e9adc9d54c","Type":"ContainerDied","Data":"bc7346336b5c00de0cdd25dc3827859d9b167a84cec65869c859aefae1bd29a3"} Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.469007 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc7346336b5c00de0cdd25dc3827859d9b167a84cec65869c859aefae1bd29a3" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.469085 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-6nnr8" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.633693 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-fqh98"] Nov 28 15:47:05 crc kubenswrapper[4857]: E1128 15:47:05.634745 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" containerName="libvirt-openstack-openstack-cell1" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.634763 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" containerName="libvirt-openstack-openstack-cell1" Nov 28 15:47:05 crc kubenswrapper[4857]: E1128 15:47:05.634832 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec6315b8-2c41-4dce-8fa6-badb75453f9c" containerName="collect-profiles" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.634840 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec6315b8-2c41-4dce-8fa6-badb75453f9c" containerName="collect-profiles" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.635372 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec6315b8-2c41-4dce-8fa6-badb75453f9c" containerName="collect-profiles" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.635415 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="72fcedc3-8360-4cff-a0ec-a6e9adc9d54c" containerName="libvirt-openstack-openstack-cell1" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.636899 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.643576 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.643872 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.644290 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.644632 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.645149 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.645557 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.645778 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.651440 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-fqh98"] Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.796234 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.796617 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ceph\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.796655 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.796683 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.796711 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.797049 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.797123 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-inventory\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.797177 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.797350 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.797420 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d99rf\" (UniqueName: \"kubernetes.io/projected/dbb3905a-b817-4261-88d3-9b8bd0d12548-kube-api-access-d99rf\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.797532 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.899542 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.899611 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d99rf\" (UniqueName: \"kubernetes.io/projected/dbb3905a-b817-4261-88d3-9b8bd0d12548-kube-api-access-d99rf\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.899731 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.899799 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.899875 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ceph\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.899932 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.900019 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.900073 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.900144 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.900204 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-inventory\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.900241 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.901468 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.901600 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.906829 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ceph\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.907145 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.906832 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.908507 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.908849 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.908919 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-inventory\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.909578 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.911615 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.932716 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d99rf\" (UniqueName: \"kubernetes.io/projected/dbb3905a-b817-4261-88d3-9b8bd0d12548-kube-api-access-d99rf\") pod \"nova-cell1-openstack-openstack-cell1-fqh98\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:05 crc kubenswrapper[4857]: I1128 15:47:05.968275 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:47:06 crc kubenswrapper[4857]: I1128 15:47:06.555806 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-fqh98"] Nov 28 15:47:06 crc kubenswrapper[4857]: W1128 15:47:06.564393 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddbb3905a_b817_4261_88d3_9b8bd0d12548.slice/crio-76eb14c5b775897c3abcc82a4feb118b3c94b162ec12b1b94d99257df2f20235 WatchSource:0}: Error finding container 76eb14c5b775897c3abcc82a4feb118b3c94b162ec12b1b94d99257df2f20235: Status 404 returned error can't find the container with id 76eb14c5b775897c3abcc82a4feb118b3c94b162ec12b1b94d99257df2f20235 Nov 28 15:47:07 crc kubenswrapper[4857]: I1128 15:47:07.499340 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" event={"ID":"dbb3905a-b817-4261-88d3-9b8bd0d12548","Type":"ContainerStarted","Data":"d8defd92b500f7065a8ed98f7ef716cd14c840a2d11c1027a495e2a2e779d513"} Nov 28 15:47:07 crc kubenswrapper[4857]: I1128 15:47:07.499643 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" event={"ID":"dbb3905a-b817-4261-88d3-9b8bd0d12548","Type":"ContainerStarted","Data":"76eb14c5b775897c3abcc82a4feb118b3c94b162ec12b1b94d99257df2f20235"} Nov 28 15:47:07 crc kubenswrapper[4857]: I1128 15:47:07.530536 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" podStartSLOduration=2.310496052 podStartE2EDuration="2.530521865s" podCreationTimestamp="2025-11-28 15:47:05 +0000 UTC" firstStartedPulling="2025-11-28 15:47:06.566697732 +0000 UTC m=+8276.690639169" lastFinishedPulling="2025-11-28 15:47:06.786723545 +0000 UTC m=+8276.910664982" observedRunningTime="2025-11-28 15:47:07.52733734 +0000 UTC m=+8277.651278817" watchObservedRunningTime="2025-11-28 15:47:07.530521865 +0000 UTC m=+8277.654463292" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.738196 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8rkbv"] Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.741098 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.755384 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8rkbv"] Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.798439 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2bpw\" (UniqueName: \"kubernetes.io/projected/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-kube-api-access-b2bpw\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.798654 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-utilities\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.799034 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-catalog-content\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.901459 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-catalog-content\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.901928 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2bpw\" (UniqueName: \"kubernetes.io/projected/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-kube-api-access-b2bpw\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.902052 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-utilities\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.902334 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-catalog-content\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.902392 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-utilities\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:28 crc kubenswrapper[4857]: I1128 15:47:28.929773 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2bpw\" (UniqueName: \"kubernetes.io/projected/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-kube-api-access-b2bpw\") pod \"redhat-operators-8rkbv\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:29 crc kubenswrapper[4857]: I1128 15:47:29.061827 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:29 crc kubenswrapper[4857]: I1128 15:47:29.747704 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8rkbv"] Nov 28 15:47:29 crc kubenswrapper[4857]: I1128 15:47:29.963391 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerStarted","Data":"491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b"} Nov 28 15:47:29 crc kubenswrapper[4857]: I1128 15:47:29.963800 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerStarted","Data":"331e49bfefc0e3c1e379941ed4bd71ff3cbae2fc9b71007e0ea5b05d90386db5"} Nov 28 15:47:30 crc kubenswrapper[4857]: I1128 15:47:30.981785 4857 generic.go:334] "Generic (PLEG): container finished" podID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerID="491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b" exitCode=0 Nov 28 15:47:30 crc kubenswrapper[4857]: I1128 15:47:30.982040 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerDied","Data":"491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b"} Nov 28 15:47:30 crc kubenswrapper[4857]: I1128 15:47:30.985898 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:47:33 crc kubenswrapper[4857]: I1128 15:47:33.012773 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerStarted","Data":"1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff"} Nov 28 15:47:36 crc kubenswrapper[4857]: I1128 15:47:36.051747 4857 generic.go:334] "Generic (PLEG): container finished" podID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerID="1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff" exitCode=0 Nov 28 15:47:36 crc kubenswrapper[4857]: I1128 15:47:36.051871 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerDied","Data":"1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff"} Nov 28 15:47:37 crc kubenswrapper[4857]: I1128 15:47:37.067591 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerStarted","Data":"8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6"} Nov 28 15:47:37 crc kubenswrapper[4857]: I1128 15:47:37.100112 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8rkbv" podStartSLOduration=3.585370568 podStartE2EDuration="9.100072898s" podCreationTimestamp="2025-11-28 15:47:28 +0000 UTC" firstStartedPulling="2025-11-28 15:47:30.985419581 +0000 UTC m=+8301.109361048" lastFinishedPulling="2025-11-28 15:47:36.500121931 +0000 UTC m=+8306.624063378" observedRunningTime="2025-11-28 15:47:37.088781367 +0000 UTC m=+8307.212722804" watchObservedRunningTime="2025-11-28 15:47:37.100072898 +0000 UTC m=+8307.224014345" Nov 28 15:47:39 crc kubenswrapper[4857]: I1128 15:47:39.063157 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:39 crc kubenswrapper[4857]: I1128 15:47:39.063217 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:40 crc kubenswrapper[4857]: I1128 15:47:40.114536 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8rkbv" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="registry-server" probeResult="failure" output=< Nov 28 15:47:40 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 15:47:40 crc kubenswrapper[4857]: > Nov 28 15:47:49 crc kubenswrapper[4857]: I1128 15:47:49.136883 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:49 crc kubenswrapper[4857]: I1128 15:47:49.256398 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:49 crc kubenswrapper[4857]: I1128 15:47:49.395939 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8rkbv"] Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.291570 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8rkbv" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="registry-server" containerID="cri-o://8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6" gracePeriod=2 Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.872163 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.895842 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2bpw\" (UniqueName: \"kubernetes.io/projected/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-kube-api-access-b2bpw\") pod \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.896032 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-utilities\") pod \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.896937 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-utilities" (OuterVolumeSpecName: "utilities") pod "77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" (UID: "77ff0e46-ac7c-480f-a38c-56f1a3ef4e24"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.897179 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-catalog-content\") pod \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\" (UID: \"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24\") " Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.900705 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:50 crc kubenswrapper[4857]: I1128 15:47:50.908241 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-kube-api-access-b2bpw" (OuterVolumeSpecName: "kube-api-access-b2bpw") pod "77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" (UID: "77ff0e46-ac7c-480f-a38c-56f1a3ef4e24"). InnerVolumeSpecName "kube-api-access-b2bpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.004082 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2bpw\" (UniqueName: \"kubernetes.io/projected/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-kube-api-access-b2bpw\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.016317 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" (UID: "77ff0e46-ac7c-480f-a38c-56f1a3ef4e24"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.107894 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.311379 4857 generic.go:334] "Generic (PLEG): container finished" podID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerID="8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6" exitCode=0 Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.311478 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8rkbv" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.311484 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerDied","Data":"8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6"} Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.311700 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8rkbv" event={"ID":"77ff0e46-ac7c-480f-a38c-56f1a3ef4e24","Type":"ContainerDied","Data":"331e49bfefc0e3c1e379941ed4bd71ff3cbae2fc9b71007e0ea5b05d90386db5"} Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.311744 4857 scope.go:117] "RemoveContainer" containerID="8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.377860 4857 scope.go:117] "RemoveContainer" containerID="1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.379873 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8rkbv"] Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.390801 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8rkbv"] Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.435133 4857 scope.go:117] "RemoveContainer" containerID="491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.473746 4857 scope.go:117] "RemoveContainer" containerID="8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6" Nov 28 15:47:51 crc kubenswrapper[4857]: E1128 15:47:51.474683 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6\": container with ID starting with 8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6 not found: ID does not exist" containerID="8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.474723 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6"} err="failed to get container status \"8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6\": rpc error: code = NotFound desc = could not find container \"8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6\": container with ID starting with 8b610623b6dda34e6a254a50db8ac4b759dfb81aa9d483bd094c5a05a9e7c3b6 not found: ID does not exist" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.474747 4857 scope.go:117] "RemoveContainer" containerID="1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff" Nov 28 15:47:51 crc kubenswrapper[4857]: E1128 15:47:51.475455 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff\": container with ID starting with 1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff not found: ID does not exist" containerID="1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.475612 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff"} err="failed to get container status \"1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff\": rpc error: code = NotFound desc = could not find container \"1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff\": container with ID starting with 1729e973a94033f2bb15aa75949c888eb4b20316b7973906b204ac8755662dff not found: ID does not exist" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.475655 4857 scope.go:117] "RemoveContainer" containerID="491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b" Nov 28 15:47:51 crc kubenswrapper[4857]: E1128 15:47:51.476310 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b\": container with ID starting with 491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b not found: ID does not exist" containerID="491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b" Nov 28 15:47:51 crc kubenswrapper[4857]: I1128 15:47:51.476354 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b"} err="failed to get container status \"491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b\": rpc error: code = NotFound desc = could not find container \"491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b\": container with ID starting with 491bd9009d403ceae0b5da2006c540dee2df1a226806e74798aca4702e41a26b not found: ID does not exist" Nov 28 15:47:52 crc kubenswrapper[4857]: I1128 15:47:52.247172 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" path="/var/lib/kubelet/pods/77ff0e46-ac7c-480f-a38c-56f1a3ef4e24/volumes" Nov 28 15:48:11 crc kubenswrapper[4857]: I1128 15:48:11.308708 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:48:11 crc kubenswrapper[4857]: I1128 15:48:11.309346 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:48:31 crc kubenswrapper[4857]: I1128 15:48:31.968715 4857 scope.go:117] "RemoveContainer" containerID="f816eb1a9abd1d25f86adddff9a37a4d814eeb19d726fb73263dae6d92002dd1" Nov 28 15:48:41 crc kubenswrapper[4857]: I1128 15:48:41.309132 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:48:41 crc kubenswrapper[4857]: I1128 15:48:41.310643 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:49:11 crc kubenswrapper[4857]: I1128 15:49:11.308150 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:49:11 crc kubenswrapper[4857]: I1128 15:49:11.308707 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:49:11 crc kubenswrapper[4857]: I1128 15:49:11.308754 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:49:11 crc kubenswrapper[4857]: I1128 15:49:11.309624 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"96dea3fb6a548f2ab8de22dad0085e59debb7ac85a02180511c7f1c1fa4f68a4"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:49:11 crc kubenswrapper[4857]: I1128 15:49:11.309695 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://96dea3fb6a548f2ab8de22dad0085e59debb7ac85a02180511c7f1c1fa4f68a4" gracePeriod=600 Nov 28 15:49:12 crc kubenswrapper[4857]: I1128 15:49:12.353172 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="96dea3fb6a548f2ab8de22dad0085e59debb7ac85a02180511c7f1c1fa4f68a4" exitCode=0 Nov 28 15:49:12 crc kubenswrapper[4857]: I1128 15:49:12.353225 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"96dea3fb6a548f2ab8de22dad0085e59debb7ac85a02180511c7f1c1fa4f68a4"} Nov 28 15:49:12 crc kubenswrapper[4857]: I1128 15:49:12.353768 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba"} Nov 28 15:49:12 crc kubenswrapper[4857]: I1128 15:49:12.353797 4857 scope.go:117] "RemoveContainer" containerID="2fb8ed319a4d63abaa543df3c6375c2d445c7842c0e60d5249ec84ade1868d55" Nov 28 15:49:32 crc kubenswrapper[4857]: I1128 15:49:32.059863 4857 scope.go:117] "RemoveContainer" containerID="eb4c53c1d76a8bccf3bb52a0d8cf55ced840e9e6e4c5f936dea7df21ad21bc08" Nov 28 15:49:32 crc kubenswrapper[4857]: I1128 15:49:32.104013 4857 scope.go:117] "RemoveContainer" containerID="a8a73ba3bf8c734c60eeacbd7fe3c034138ccec62b670c3b7c9eb9dff531809d" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.159502 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qssgv"] Nov 28 15:49:35 crc kubenswrapper[4857]: E1128 15:49:35.160913 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="registry-server" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.160935 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="registry-server" Nov 28 15:49:35 crc kubenswrapper[4857]: E1128 15:49:35.161050 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="extract-utilities" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.161064 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="extract-utilities" Nov 28 15:49:35 crc kubenswrapper[4857]: E1128 15:49:35.161105 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="extract-content" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.161118 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="extract-content" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.161550 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="77ff0e46-ac7c-480f-a38c-56f1a3ef4e24" containerName="registry-server" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.165125 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.191267 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qssgv"] Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.191510 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-catalog-content\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.191624 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27n88\" (UniqueName: \"kubernetes.io/projected/c1870287-c29c-4362-b229-874218beebec-kube-api-access-27n88\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.192001 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-utilities\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.293878 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-utilities\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.294314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-catalog-content\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.294398 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27n88\" (UniqueName: \"kubernetes.io/projected/c1870287-c29c-4362-b229-874218beebec-kube-api-access-27n88\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.294429 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-utilities\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.294656 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-catalog-content\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.325838 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27n88\" (UniqueName: \"kubernetes.io/projected/c1870287-c29c-4362-b229-874218beebec-kube-api-access-27n88\") pod \"community-operators-qssgv\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:35 crc kubenswrapper[4857]: I1128 15:49:35.504940 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:36 crc kubenswrapper[4857]: I1128 15:49:36.102337 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qssgv"] Nov 28 15:49:36 crc kubenswrapper[4857]: I1128 15:49:36.682478 4857 generic.go:334] "Generic (PLEG): container finished" podID="c1870287-c29c-4362-b229-874218beebec" containerID="3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6" exitCode=0 Nov 28 15:49:36 crc kubenswrapper[4857]: I1128 15:49:36.682581 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qssgv" event={"ID":"c1870287-c29c-4362-b229-874218beebec","Type":"ContainerDied","Data":"3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6"} Nov 28 15:49:36 crc kubenswrapper[4857]: I1128 15:49:36.682834 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qssgv" event={"ID":"c1870287-c29c-4362-b229-874218beebec","Type":"ContainerStarted","Data":"445ee8ea1fa9ef297b4066853213d491d51297dfb257b198d0fdd5d3af48c01d"} Nov 28 15:49:38 crc kubenswrapper[4857]: I1128 15:49:38.709468 4857 generic.go:334] "Generic (PLEG): container finished" podID="c1870287-c29c-4362-b229-874218beebec" containerID="856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c" exitCode=0 Nov 28 15:49:38 crc kubenswrapper[4857]: I1128 15:49:38.709618 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qssgv" event={"ID":"c1870287-c29c-4362-b229-874218beebec","Type":"ContainerDied","Data":"856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c"} Nov 28 15:49:39 crc kubenswrapper[4857]: I1128 15:49:39.723474 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qssgv" event={"ID":"c1870287-c29c-4362-b229-874218beebec","Type":"ContainerStarted","Data":"96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c"} Nov 28 15:49:39 crc kubenswrapper[4857]: I1128 15:49:39.750392 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qssgv" podStartSLOduration=2.106687112 podStartE2EDuration="4.750366048s" podCreationTimestamp="2025-11-28 15:49:35 +0000 UTC" firstStartedPulling="2025-11-28 15:49:36.684464904 +0000 UTC m=+8426.808406361" lastFinishedPulling="2025-11-28 15:49:39.32814386 +0000 UTC m=+8429.452085297" observedRunningTime="2025-11-28 15:49:39.740260619 +0000 UTC m=+8429.864202076" watchObservedRunningTime="2025-11-28 15:49:39.750366048 +0000 UTC m=+8429.874307525" Nov 28 15:49:45 crc kubenswrapper[4857]: I1128 15:49:45.506015 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:45 crc kubenswrapper[4857]: I1128 15:49:45.506682 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:45 crc kubenswrapper[4857]: I1128 15:49:45.574725 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:45 crc kubenswrapper[4857]: I1128 15:49:45.848715 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:45 crc kubenswrapper[4857]: I1128 15:49:45.901405 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qssgv"] Nov 28 15:49:47 crc kubenswrapper[4857]: I1128 15:49:47.825753 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qssgv" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="registry-server" containerID="cri-o://96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c" gracePeriod=2 Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.460897 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.608735 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-utilities\") pod \"c1870287-c29c-4362-b229-874218beebec\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.609197 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-27n88\" (UniqueName: \"kubernetes.io/projected/c1870287-c29c-4362-b229-874218beebec-kube-api-access-27n88\") pod \"c1870287-c29c-4362-b229-874218beebec\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.609351 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-catalog-content\") pod \"c1870287-c29c-4362-b229-874218beebec\" (UID: \"c1870287-c29c-4362-b229-874218beebec\") " Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.610260 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-utilities" (OuterVolumeSpecName: "utilities") pod "c1870287-c29c-4362-b229-874218beebec" (UID: "c1870287-c29c-4362-b229-874218beebec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.617700 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1870287-c29c-4362-b229-874218beebec-kube-api-access-27n88" (OuterVolumeSpecName: "kube-api-access-27n88") pod "c1870287-c29c-4362-b229-874218beebec" (UID: "c1870287-c29c-4362-b229-874218beebec"). InnerVolumeSpecName "kube-api-access-27n88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.664324 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1870287-c29c-4362-b229-874218beebec" (UID: "c1870287-c29c-4362-b229-874218beebec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.713020 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-27n88\" (UniqueName: \"kubernetes.io/projected/c1870287-c29c-4362-b229-874218beebec-kube-api-access-27n88\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.713063 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.713075 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1870287-c29c-4362-b229-874218beebec-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.848901 4857 generic.go:334] "Generic (PLEG): container finished" podID="c1870287-c29c-4362-b229-874218beebec" containerID="96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c" exitCode=0 Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.849013 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qssgv" event={"ID":"c1870287-c29c-4362-b229-874218beebec","Type":"ContainerDied","Data":"96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c"} Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.849059 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qssgv" event={"ID":"c1870287-c29c-4362-b229-874218beebec","Type":"ContainerDied","Data":"445ee8ea1fa9ef297b4066853213d491d51297dfb257b198d0fdd5d3af48c01d"} Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.849091 4857 scope.go:117] "RemoveContainer" containerID="96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.849336 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qssgv" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.887689 4857 scope.go:117] "RemoveContainer" containerID="856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c" Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.906463 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qssgv"] Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.920695 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qssgv"] Nov 28 15:49:48 crc kubenswrapper[4857]: I1128 15:49:48.928788 4857 scope.go:117] "RemoveContainer" containerID="3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6" Nov 28 15:49:49 crc kubenswrapper[4857]: I1128 15:49:49.008257 4857 scope.go:117] "RemoveContainer" containerID="96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c" Nov 28 15:49:49 crc kubenswrapper[4857]: E1128 15:49:49.009199 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c\": container with ID starting with 96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c not found: ID does not exist" containerID="96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c" Nov 28 15:49:49 crc kubenswrapper[4857]: I1128 15:49:49.009270 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c"} err="failed to get container status \"96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c\": rpc error: code = NotFound desc = could not find container \"96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c\": container with ID starting with 96c051f8fa5abebefa0067342c8dedfbe86be511bb08584c7185fb0e6475b12c not found: ID does not exist" Nov 28 15:49:49 crc kubenswrapper[4857]: I1128 15:49:49.009303 4857 scope.go:117] "RemoveContainer" containerID="856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c" Nov 28 15:49:49 crc kubenswrapper[4857]: E1128 15:49:49.009992 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c\": container with ID starting with 856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c not found: ID does not exist" containerID="856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c" Nov 28 15:49:49 crc kubenswrapper[4857]: I1128 15:49:49.010029 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c"} err="failed to get container status \"856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c\": rpc error: code = NotFound desc = could not find container \"856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c\": container with ID starting with 856d28e25949f08cd876e985789b0ef0d39d23a4bc724acac5aa5ab923a4ca6c not found: ID does not exist" Nov 28 15:49:49 crc kubenswrapper[4857]: I1128 15:49:49.010048 4857 scope.go:117] "RemoveContainer" containerID="3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6" Nov 28 15:49:49 crc kubenswrapper[4857]: E1128 15:49:49.010749 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6\": container with ID starting with 3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6 not found: ID does not exist" containerID="3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6" Nov 28 15:49:49 crc kubenswrapper[4857]: I1128 15:49:49.010808 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6"} err="failed to get container status \"3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6\": rpc error: code = NotFound desc = could not find container \"3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6\": container with ID starting with 3a1500803ecdc1a3e5c9011d513cfd97e9e176020286e73d23a50aa0855d70d6 not found: ID does not exist" Nov 28 15:49:50 crc kubenswrapper[4857]: I1128 15:49:50.253355 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1870287-c29c-4362-b229-874218beebec" path="/var/lib/kubelet/pods/c1870287-c29c-4362-b229-874218beebec/volumes" Nov 28 15:50:27 crc kubenswrapper[4857]: I1128 15:50:27.359119 4857 generic.go:334] "Generic (PLEG): container finished" podID="dbb3905a-b817-4261-88d3-9b8bd0d12548" containerID="d8defd92b500f7065a8ed98f7ef716cd14c840a2d11c1027a495e2a2e779d513" exitCode=0 Nov 28 15:50:27 crc kubenswrapper[4857]: I1128 15:50:27.359208 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" event={"ID":"dbb3905a-b817-4261-88d3-9b8bd0d12548","Type":"ContainerDied","Data":"d8defd92b500f7065a8ed98f7ef716cd14c840a2d11c1027a495e2a2e779d513"} Nov 28 15:50:28 crc kubenswrapper[4857]: I1128 15:50:28.960096 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.071739 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ceph\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.071856 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d99rf\" (UniqueName: \"kubernetes.io/projected/dbb3905a-b817-4261-88d3-9b8bd0d12548-kube-api-access-d99rf\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.071903 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-inventory\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072009 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-0\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072121 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-1\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072155 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-1\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072214 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-combined-ca-bundle\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072253 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ssh-key\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072293 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-0\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072325 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-1\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.072442 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-0\") pod \"dbb3905a-b817-4261-88d3-9b8bd0d12548\" (UID: \"dbb3905a-b817-4261-88d3-9b8bd0d12548\") " Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.079250 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ceph" (OuterVolumeSpecName: "ceph") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.080068 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbb3905a-b817-4261-88d3-9b8bd0d12548-kube-api-access-d99rf" (OuterVolumeSpecName: "kube-api-access-d99rf") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "kube-api-access-d99rf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.081899 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.103135 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.109935 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.113164 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.121420 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-inventory" (OuterVolumeSpecName: "inventory") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.124265 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.132105 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.139860 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.140910 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "dbb3905a-b817-4261-88d3-9b8bd0d12548" (UID: "dbb3905a-b817-4261-88d3-9b8bd0d12548"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175102 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d99rf\" (UniqueName: \"kubernetes.io/projected/dbb3905a-b817-4261-88d3-9b8bd0d12548-kube-api-access-d99rf\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175268 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175324 4857 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175403 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175466 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175519 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175569 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175620 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175671 4857 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175720 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/dbb3905a-b817-4261-88d3-9b8bd0d12548-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.175776 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/dbb3905a-b817-4261-88d3-9b8bd0d12548-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.383691 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" event={"ID":"dbb3905a-b817-4261-88d3-9b8bd0d12548","Type":"ContainerDied","Data":"76eb14c5b775897c3abcc82a4feb118b3c94b162ec12b1b94d99257df2f20235"} Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.383734 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76eb14c5b775897c3abcc82a4feb118b3c94b162ec12b1b94d99257df2f20235" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.383732 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-fqh98" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.497275 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-xdxr5"] Nov 28 15:50:29 crc kubenswrapper[4857]: E1128 15:50:29.497829 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="extract-utilities" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.497850 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="extract-utilities" Nov 28 15:50:29 crc kubenswrapper[4857]: E1128 15:50:29.497880 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbb3905a-b817-4261-88d3-9b8bd0d12548" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.497889 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbb3905a-b817-4261-88d3-9b8bd0d12548" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 15:50:29 crc kubenswrapper[4857]: E1128 15:50:29.497916 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="extract-content" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.497924 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="extract-content" Nov 28 15:50:29 crc kubenswrapper[4857]: E1128 15:50:29.497960 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="registry-server" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.497968 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="registry-server" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.498207 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1870287-c29c-4362-b229-874218beebec" containerName="registry-server" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.498239 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbb3905a-b817-4261-88d3-9b8bd0d12548" containerName="nova-cell1-openstack-openstack-cell1" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.499228 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.500990 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.501709 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.502011 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.502322 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.504699 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.521104 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-xdxr5"] Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.584168 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.584533 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ssh-key\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.584689 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.584922 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceph\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.585142 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.585230 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-inventory\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.585446 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnvbp\" (UniqueName: \"kubernetes.io/projected/afa81231-caa2-4a60-b28f-77409c0837e2-kube-api-access-mnvbp\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.585536 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.686970 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnvbp\" (UniqueName: \"kubernetes.io/projected/afa81231-caa2-4a60-b28f-77409c0837e2-kube-api-access-mnvbp\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.687037 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.687081 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.687854 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ssh-key\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.687915 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.687977 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceph\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.688300 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.688374 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-inventory\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.692820 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.696797 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ssh-key\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.697163 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceph\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.697296 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-inventory\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.697498 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.697853 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.698070 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.712060 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnvbp\" (UniqueName: \"kubernetes.io/projected/afa81231-caa2-4a60-b28f-77409c0837e2-kube-api-access-mnvbp\") pod \"telemetry-openstack-openstack-cell1-xdxr5\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:29 crc kubenswrapper[4857]: I1128 15:50:29.827884 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:50:30 crc kubenswrapper[4857]: I1128 15:50:30.384463 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-xdxr5"] Nov 28 15:50:31 crc kubenswrapper[4857]: I1128 15:50:31.426841 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" event={"ID":"afa81231-caa2-4a60-b28f-77409c0837e2","Type":"ContainerStarted","Data":"f5aabe87e62fbabe000cb8339fd06735600ad6d853ec23b0929137c1d565d10a"} Nov 28 15:50:31 crc kubenswrapper[4857]: I1128 15:50:31.427694 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" event={"ID":"afa81231-caa2-4a60-b28f-77409c0837e2","Type":"ContainerStarted","Data":"b62385b1f645190f1b4b9e3cdcfc3dc4d0443208ad1fd34a7e2479bc0797d80c"} Nov 28 15:50:31 crc kubenswrapper[4857]: I1128 15:50:31.457722 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" podStartSLOduration=2.273099502 podStartE2EDuration="2.457703351s" podCreationTimestamp="2025-11-28 15:50:29 +0000 UTC" firstStartedPulling="2025-11-28 15:50:30.393531861 +0000 UTC m=+8480.517473298" lastFinishedPulling="2025-11-28 15:50:30.5781357 +0000 UTC m=+8480.702077147" observedRunningTime="2025-11-28 15:50:31.451518497 +0000 UTC m=+8481.575459964" watchObservedRunningTime="2025-11-28 15:50:31.457703351 +0000 UTC m=+8481.581644778" Nov 28 15:51:11 crc kubenswrapper[4857]: I1128 15:51:11.308831 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:51:11 crc kubenswrapper[4857]: I1128 15:51:11.309749 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:51:41 crc kubenswrapper[4857]: I1128 15:51:41.308591 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:51:41 crc kubenswrapper[4857]: I1128 15:51:41.309736 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.308752 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.309587 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.309649 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.310773 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.310855 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" gracePeriod=600 Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.677079 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" exitCode=0 Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.677159 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba"} Nov 28 15:52:11 crc kubenswrapper[4857]: I1128 15:52:11.677618 4857 scope.go:117] "RemoveContainer" containerID="96dea3fb6a548f2ab8de22dad0085e59debb7ac85a02180511c7f1c1fa4f68a4" Nov 28 15:52:12 crc kubenswrapper[4857]: E1128 15:52:12.006135 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:52:12 crc kubenswrapper[4857]: I1128 15:52:12.692745 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:52:12 crc kubenswrapper[4857]: E1128 15:52:12.695671 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:52:26 crc kubenswrapper[4857]: I1128 15:52:26.229570 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:52:26 crc kubenswrapper[4857]: E1128 15:52:26.230898 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:52:39 crc kubenswrapper[4857]: I1128 15:52:39.229048 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:52:39 crc kubenswrapper[4857]: E1128 15:52:39.230156 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:52:54 crc kubenswrapper[4857]: I1128 15:52:54.230397 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:52:54 crc kubenswrapper[4857]: E1128 15:52:54.231314 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:53:07 crc kubenswrapper[4857]: I1128 15:53:07.229981 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:53:07 crc kubenswrapper[4857]: E1128 15:53:07.230936 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.622603 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7bnj5"] Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.625457 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.647036 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7bnj5"] Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.770071 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxbrf\" (UniqueName: \"kubernetes.io/projected/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-kube-api-access-nxbrf\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.770424 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-utilities\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.770655 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-catalog-content\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.872774 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-catalog-content\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.873012 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxbrf\" (UniqueName: \"kubernetes.io/projected/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-kube-api-access-nxbrf\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.873062 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-utilities\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.873831 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-catalog-content\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.874165 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-utilities\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.902295 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxbrf\" (UniqueName: \"kubernetes.io/projected/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-kube-api-access-nxbrf\") pod \"redhat-marketplace-7bnj5\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:11 crc kubenswrapper[4857]: I1128 15:53:11.977577 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:12 crc kubenswrapper[4857]: I1128 15:53:12.519864 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7bnj5"] Nov 28 15:53:13 crc kubenswrapper[4857]: I1128 15:53:13.517496 4857 generic.go:334] "Generic (PLEG): container finished" podID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerID="a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49" exitCode=0 Nov 28 15:53:13 crc kubenswrapper[4857]: I1128 15:53:13.518087 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerDied","Data":"a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49"} Nov 28 15:53:13 crc kubenswrapper[4857]: I1128 15:53:13.518140 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerStarted","Data":"ab33d58d0fcad4f42a6bba40c7813f9bb606826be3225397e6463111e7e2ac11"} Nov 28 15:53:13 crc kubenswrapper[4857]: I1128 15:53:13.521807 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:53:14 crc kubenswrapper[4857]: I1128 15:53:14.537585 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerStarted","Data":"fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3"} Nov 28 15:53:15 crc kubenswrapper[4857]: I1128 15:53:15.551857 4857 generic.go:334] "Generic (PLEG): container finished" podID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerID="fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3" exitCode=0 Nov 28 15:53:15 crc kubenswrapper[4857]: I1128 15:53:15.551975 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerDied","Data":"fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3"} Nov 28 15:53:16 crc kubenswrapper[4857]: I1128 15:53:16.566065 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerStarted","Data":"28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466"} Nov 28 15:53:16 crc kubenswrapper[4857]: I1128 15:53:16.596332 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7bnj5" podStartSLOduration=2.861714557 podStartE2EDuration="5.59631583s" podCreationTimestamp="2025-11-28 15:53:11 +0000 UTC" firstStartedPulling="2025-11-28 15:53:13.521569241 +0000 UTC m=+8643.645510678" lastFinishedPulling="2025-11-28 15:53:16.256170484 +0000 UTC m=+8646.380111951" observedRunningTime="2025-11-28 15:53:16.593181337 +0000 UTC m=+8646.717122774" watchObservedRunningTime="2025-11-28 15:53:16.59631583 +0000 UTC m=+8646.720257267" Nov 28 15:53:19 crc kubenswrapper[4857]: I1128 15:53:19.230024 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:53:19 crc kubenswrapper[4857]: E1128 15:53:19.231178 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:53:21 crc kubenswrapper[4857]: I1128 15:53:21.978716 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:21 crc kubenswrapper[4857]: I1128 15:53:21.981285 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:22 crc kubenswrapper[4857]: I1128 15:53:22.497572 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:22 crc kubenswrapper[4857]: I1128 15:53:22.680955 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:22 crc kubenswrapper[4857]: I1128 15:53:22.739200 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7bnj5"] Nov 28 15:53:24 crc kubenswrapper[4857]: I1128 15:53:24.651521 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7bnj5" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="registry-server" containerID="cri-o://28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466" gracePeriod=2 Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.222556 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.405189 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-catalog-content\") pod \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.405248 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxbrf\" (UniqueName: \"kubernetes.io/projected/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-kube-api-access-nxbrf\") pod \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.405407 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-utilities\") pod \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\" (UID: \"f8804f55-e65f-4d2a-81d9-5238e5be1d0e\") " Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.406764 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-utilities" (OuterVolumeSpecName: "utilities") pod "f8804f55-e65f-4d2a-81d9-5238e5be1d0e" (UID: "f8804f55-e65f-4d2a-81d9-5238e5be1d0e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.418355 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-kube-api-access-nxbrf" (OuterVolumeSpecName: "kube-api-access-nxbrf") pod "f8804f55-e65f-4d2a-81d9-5238e5be1d0e" (UID: "f8804f55-e65f-4d2a-81d9-5238e5be1d0e"). InnerVolumeSpecName "kube-api-access-nxbrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.437709 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8804f55-e65f-4d2a-81d9-5238e5be1d0e" (UID: "f8804f55-e65f-4d2a-81d9-5238e5be1d0e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.508825 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxbrf\" (UniqueName: \"kubernetes.io/projected/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-kube-api-access-nxbrf\") on node \"crc\" DevicePath \"\"" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.508861 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.508875 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8804f55-e65f-4d2a-81d9-5238e5be1d0e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.663630 4857 generic.go:334] "Generic (PLEG): container finished" podID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerID="28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466" exitCode=0 Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.663736 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerDied","Data":"28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466"} Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.664088 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7bnj5" event={"ID":"f8804f55-e65f-4d2a-81d9-5238e5be1d0e","Type":"ContainerDied","Data":"ab33d58d0fcad4f42a6bba40c7813f9bb606826be3225397e6463111e7e2ac11"} Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.664109 4857 scope.go:117] "RemoveContainer" containerID="28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.663775 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7bnj5" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.691668 4857 scope.go:117] "RemoveContainer" containerID="fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.722579 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7bnj5"] Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.729913 4857 scope.go:117] "RemoveContainer" containerID="a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.745626 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7bnj5"] Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.775552 4857 scope.go:117] "RemoveContainer" containerID="28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466" Nov 28 15:53:25 crc kubenswrapper[4857]: E1128 15:53:25.775956 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466\": container with ID starting with 28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466 not found: ID does not exist" containerID="28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.775985 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466"} err="failed to get container status \"28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466\": rpc error: code = NotFound desc = could not find container \"28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466\": container with ID starting with 28b22e5d62f970e1d575bdc701912279da98517ee996d31ef80262e17109d466 not found: ID does not exist" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.776004 4857 scope.go:117] "RemoveContainer" containerID="fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3" Nov 28 15:53:25 crc kubenswrapper[4857]: E1128 15:53:25.776651 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3\": container with ID starting with fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3 not found: ID does not exist" containerID="fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.776673 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3"} err="failed to get container status \"fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3\": rpc error: code = NotFound desc = could not find container \"fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3\": container with ID starting with fcbaf52f40f4516b8362a10c17d2cbcab76d4855022608a46c2527f5f26327c3 not found: ID does not exist" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.776686 4857 scope.go:117] "RemoveContainer" containerID="a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49" Nov 28 15:53:25 crc kubenswrapper[4857]: E1128 15:53:25.777422 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49\": container with ID starting with a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49 not found: ID does not exist" containerID="a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49" Nov 28 15:53:25 crc kubenswrapper[4857]: I1128 15:53:25.777441 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49"} err="failed to get container status \"a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49\": rpc error: code = NotFound desc = could not find container \"a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49\": container with ID starting with a38dc06d3ffd19537567f31da77d9d74d02e5bd6d74e30ae15e4137898152f49 not found: ID does not exist" Nov 28 15:53:26 crc kubenswrapper[4857]: I1128 15:53:26.240254 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" path="/var/lib/kubelet/pods/f8804f55-e65f-4d2a-81d9-5238e5be1d0e/volumes" Nov 28 15:53:30 crc kubenswrapper[4857]: I1128 15:53:30.238400 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:53:30 crc kubenswrapper[4857]: E1128 15:53:30.239511 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.454070 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qd6f5"] Nov 28 15:53:32 crc kubenswrapper[4857]: E1128 15:53:32.454929 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="extract-content" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.454943 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="extract-content" Nov 28 15:53:32 crc kubenswrapper[4857]: E1128 15:53:32.455017 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="registry-server" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.455026 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="registry-server" Nov 28 15:53:32 crc kubenswrapper[4857]: E1128 15:53:32.455039 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="extract-utilities" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.455047 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="extract-utilities" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.455351 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8804f55-e65f-4d2a-81d9-5238e5be1d0e" containerName="registry-server" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.457511 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.477083 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qd6f5"] Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.580145 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6jgc\" (UniqueName: \"kubernetes.io/projected/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-kube-api-access-g6jgc\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.580296 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-catalog-content\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.580368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-utilities\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.682867 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-catalog-content\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.682991 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-utilities\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.683128 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6jgc\" (UniqueName: \"kubernetes.io/projected/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-kube-api-access-g6jgc\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.683984 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-catalog-content\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.684270 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-utilities\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.704766 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6jgc\" (UniqueName: \"kubernetes.io/projected/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-kube-api-access-g6jgc\") pod \"certified-operators-qd6f5\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:32 crc kubenswrapper[4857]: I1128 15:53:32.790517 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:33 crc kubenswrapper[4857]: I1128 15:53:33.264312 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qd6f5"] Nov 28 15:53:33 crc kubenswrapper[4857]: I1128 15:53:33.773617 4857 generic.go:334] "Generic (PLEG): container finished" podID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerID="80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e" exitCode=0 Nov 28 15:53:33 crc kubenswrapper[4857]: I1128 15:53:33.773698 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerDied","Data":"80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e"} Nov 28 15:53:33 crc kubenswrapper[4857]: I1128 15:53:33.775535 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerStarted","Data":"830c68f1b8c64874a3146db02c248b5002a4c82a17456dbf44d79c4291364a31"} Nov 28 15:53:35 crc kubenswrapper[4857]: I1128 15:53:35.815808 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerStarted","Data":"4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c"} Nov 28 15:53:36 crc kubenswrapper[4857]: I1128 15:53:36.827154 4857 generic.go:334] "Generic (PLEG): container finished" podID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerID="4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c" exitCode=0 Nov 28 15:53:36 crc kubenswrapper[4857]: I1128 15:53:36.827311 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerDied","Data":"4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c"} Nov 28 15:53:37 crc kubenswrapper[4857]: I1128 15:53:37.843622 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerStarted","Data":"54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc"} Nov 28 15:53:37 crc kubenswrapper[4857]: I1128 15:53:37.874568 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qd6f5" podStartSLOduration=2.259087361 podStartE2EDuration="5.87454364s" podCreationTimestamp="2025-11-28 15:53:32 +0000 UTC" firstStartedPulling="2025-11-28 15:53:33.776735334 +0000 UTC m=+8663.900676801" lastFinishedPulling="2025-11-28 15:53:37.392191643 +0000 UTC m=+8667.516133080" observedRunningTime="2025-11-28 15:53:37.870184884 +0000 UTC m=+8667.994126331" watchObservedRunningTime="2025-11-28 15:53:37.87454364 +0000 UTC m=+8667.998485097" Nov 28 15:53:42 crc kubenswrapper[4857]: I1128 15:53:42.791276 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:42 crc kubenswrapper[4857]: I1128 15:53:42.792191 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:42 crc kubenswrapper[4857]: I1128 15:53:42.886900 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:43 crc kubenswrapper[4857]: I1128 15:53:43.012792 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:43 crc kubenswrapper[4857]: I1128 15:53:43.136970 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qd6f5"] Nov 28 15:53:43 crc kubenswrapper[4857]: I1128 15:53:43.230251 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:53:43 crc kubenswrapper[4857]: E1128 15:53:43.230760 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:53:44 crc kubenswrapper[4857]: I1128 15:53:44.950041 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qd6f5" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="registry-server" containerID="cri-o://54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc" gracePeriod=2 Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.505175 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.621004 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6jgc\" (UniqueName: \"kubernetes.io/projected/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-kube-api-access-g6jgc\") pod \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.621428 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-utilities\") pod \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.621556 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-catalog-content\") pod \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\" (UID: \"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1\") " Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.622902 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-utilities" (OuterVolumeSpecName: "utilities") pod "3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" (UID: "3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.628261 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-kube-api-access-g6jgc" (OuterVolumeSpecName: "kube-api-access-g6jgc") pod "3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" (UID: "3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1"). InnerVolumeSpecName "kube-api-access-g6jgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.683247 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" (UID: "3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.724363 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6jgc\" (UniqueName: \"kubernetes.io/projected/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-kube-api-access-g6jgc\") on node \"crc\" DevicePath \"\"" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.724398 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.724412 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.964399 4857 generic.go:334] "Generic (PLEG): container finished" podID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerID="54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc" exitCode=0 Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.964439 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerDied","Data":"54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc"} Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.964462 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qd6f5" event={"ID":"3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1","Type":"ContainerDied","Data":"830c68f1b8c64874a3146db02c248b5002a4c82a17456dbf44d79c4291364a31"} Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.964478 4857 scope.go:117] "RemoveContainer" containerID="54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.964632 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qd6f5" Nov 28 15:53:45 crc kubenswrapper[4857]: I1128 15:53:45.994579 4857 scope.go:117] "RemoveContainer" containerID="4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.013474 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qd6f5"] Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.022473 4857 scope.go:117] "RemoveContainer" containerID="80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.025160 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qd6f5"] Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.085341 4857 scope.go:117] "RemoveContainer" containerID="54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc" Nov 28 15:53:46 crc kubenswrapper[4857]: E1128 15:53:46.085961 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc\": container with ID starting with 54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc not found: ID does not exist" containerID="54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.086003 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc"} err="failed to get container status \"54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc\": rpc error: code = NotFound desc = could not find container \"54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc\": container with ID starting with 54498ea801665a99840cd5fe38765712ae1880afe55c05c29762c00a24c148fc not found: ID does not exist" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.086030 4857 scope.go:117] "RemoveContainer" containerID="4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c" Nov 28 15:53:46 crc kubenswrapper[4857]: E1128 15:53:46.086487 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c\": container with ID starting with 4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c not found: ID does not exist" containerID="4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.086526 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c"} err="failed to get container status \"4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c\": rpc error: code = NotFound desc = could not find container \"4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c\": container with ID starting with 4a5c15b0c90aa434289bab40c8dd8d80a1fa813ed3a1fe8e2244ce862583da8c not found: ID does not exist" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.086552 4857 scope.go:117] "RemoveContainer" containerID="80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e" Nov 28 15:53:46 crc kubenswrapper[4857]: E1128 15:53:46.087107 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e\": container with ID starting with 80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e not found: ID does not exist" containerID="80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.087139 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e"} err="failed to get container status \"80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e\": rpc error: code = NotFound desc = could not find container \"80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e\": container with ID starting with 80d6e5a9425af777e56d8c6c888f3e76e0df743947f1c962f17b75676f805a2e not found: ID does not exist" Nov 28 15:53:46 crc kubenswrapper[4857]: I1128 15:53:46.242541 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" path="/var/lib/kubelet/pods/3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1/volumes" Nov 28 15:53:55 crc kubenswrapper[4857]: I1128 15:53:55.229366 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:53:55 crc kubenswrapper[4857]: E1128 15:53:55.231631 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:54:09 crc kubenswrapper[4857]: I1128 15:54:09.229340 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:54:09 crc kubenswrapper[4857]: E1128 15:54:09.231704 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:54:24 crc kubenswrapper[4857]: I1128 15:54:24.229003 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:54:24 crc kubenswrapper[4857]: E1128 15:54:24.229884 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:54:36 crc kubenswrapper[4857]: I1128 15:54:36.639473 4857 generic.go:334] "Generic (PLEG): container finished" podID="afa81231-caa2-4a60-b28f-77409c0837e2" containerID="f5aabe87e62fbabe000cb8339fd06735600ad6d853ec23b0929137c1d565d10a" exitCode=0 Nov 28 15:54:36 crc kubenswrapper[4857]: I1128 15:54:36.639570 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" event={"ID":"afa81231-caa2-4a60-b28f-77409c0837e2","Type":"ContainerDied","Data":"f5aabe87e62fbabe000cb8339fd06735600ad6d853ec23b0929137c1d565d10a"} Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.295223 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392456 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-0\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392510 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-telemetry-combined-ca-bundle\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392583 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-1\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392692 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ssh-key\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392759 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-2\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392868 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-inventory\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392935 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceph\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.392971 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnvbp\" (UniqueName: \"kubernetes.io/projected/afa81231-caa2-4a60-b28f-77409c0837e2-kube-api-access-mnvbp\") pod \"afa81231-caa2-4a60-b28f-77409c0837e2\" (UID: \"afa81231-caa2-4a60-b28f-77409c0837e2\") " Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.400091 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afa81231-caa2-4a60-b28f-77409c0837e2-kube-api-access-mnvbp" (OuterVolumeSpecName: "kube-api-access-mnvbp") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "kube-api-access-mnvbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.400605 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceph" (OuterVolumeSpecName: "ceph") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.406115 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.426506 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.435964 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.444726 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.447191 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.455628 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-inventory" (OuterVolumeSpecName: "inventory") pod "afa81231-caa2-4a60-b28f-77409c0837e2" (UID: "afa81231-caa2-4a60-b28f-77409c0837e2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497179 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497244 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497265 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnvbp\" (UniqueName: \"kubernetes.io/projected/afa81231-caa2-4a60-b28f-77409c0837e2-kube-api-access-mnvbp\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497281 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497296 4857 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497311 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497324 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.497338 4857 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/afa81231-caa2-4a60-b28f-77409c0837e2-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.667741 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" event={"ID":"afa81231-caa2-4a60-b28f-77409c0837e2","Type":"ContainerDied","Data":"b62385b1f645190f1b4b9e3cdcfc3dc4d0443208ad1fd34a7e2479bc0797d80c"} Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.667832 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b62385b1f645190f1b4b9e3cdcfc3dc4d0443208ad1fd34a7e2479bc0797d80c" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.667943 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-xdxr5" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.816470 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-lwbfw"] Nov 28 15:54:38 crc kubenswrapper[4857]: E1128 15:54:38.818026 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="extract-content" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.818056 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="extract-content" Nov 28 15:54:38 crc kubenswrapper[4857]: E1128 15:54:38.818087 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="extract-utilities" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.818101 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="extract-utilities" Nov 28 15:54:38 crc kubenswrapper[4857]: E1128 15:54:38.818147 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="registry-server" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.818159 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="registry-server" Nov 28 15:54:38 crc kubenswrapper[4857]: E1128 15:54:38.818181 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afa81231-caa2-4a60-b28f-77409c0837e2" containerName="telemetry-openstack-openstack-cell1" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.818190 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="afa81231-caa2-4a60-b28f-77409c0837e2" containerName="telemetry-openstack-openstack-cell1" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.818522 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="afa81231-caa2-4a60-b28f-77409c0837e2" containerName="telemetry-openstack-openstack-cell1" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.818546 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ee7ba8f-82d9-4057-88f5-cc3b8f72e8a1" containerName="registry-server" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.820183 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.825147 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.825314 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.825662 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.826129 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.826271 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:54:38 crc kubenswrapper[4857]: I1128 15:54:38.850241 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-lwbfw"] Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.016410 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.016581 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.016684 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtkq4\" (UniqueName: \"kubernetes.io/projected/1143dca0-289a-4a62-9437-07ee877305c2-kube-api-access-wtkq4\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.018035 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.018484 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.020010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.123158 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.123314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.123348 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.123440 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.123474 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.123508 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtkq4\" (UniqueName: \"kubernetes.io/projected/1143dca0-289a-4a62-9437-07ee877305c2-kube-api-access-wtkq4\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.130581 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.130872 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ceph\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.131427 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.137606 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.138557 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.145354 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtkq4\" (UniqueName: \"kubernetes.io/projected/1143dca0-289a-4a62-9437-07ee877305c2-kube-api-access-wtkq4\") pod \"neutron-sriov-openstack-openstack-cell1-lwbfw\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.153061 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.229384 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:54:39 crc kubenswrapper[4857]: E1128 15:54:39.229622 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:54:39 crc kubenswrapper[4857]: I1128 15:54:39.770073 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-lwbfw"] Nov 28 15:54:40 crc kubenswrapper[4857]: I1128 15:54:40.687805 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" event={"ID":"1143dca0-289a-4a62-9437-07ee877305c2","Type":"ContainerStarted","Data":"bf39f0e7e2a48a9fff99f6ab1da0843e37c1cdd662608a66fa53872a9376c051"} Nov 28 15:54:40 crc kubenswrapper[4857]: I1128 15:54:40.688182 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" event={"ID":"1143dca0-289a-4a62-9437-07ee877305c2","Type":"ContainerStarted","Data":"c5dcc8f89f2e8cfeecfb7afd21fe8ce58fbf4ecc8d08743b9a005ea754cfdbdb"} Nov 28 15:54:40 crc kubenswrapper[4857]: I1128 15:54:40.719939 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" podStartSLOduration=2.530722684 podStartE2EDuration="2.719918066s" podCreationTimestamp="2025-11-28 15:54:38 +0000 UTC" firstStartedPulling="2025-11-28 15:54:39.774579227 +0000 UTC m=+8729.898520664" lastFinishedPulling="2025-11-28 15:54:39.963774619 +0000 UTC m=+8730.087716046" observedRunningTime="2025-11-28 15:54:40.708094262 +0000 UTC m=+8730.832035699" watchObservedRunningTime="2025-11-28 15:54:40.719918066 +0000 UTC m=+8730.843859503" Nov 28 15:54:52 crc kubenswrapper[4857]: I1128 15:54:52.228695 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:54:52 crc kubenswrapper[4857]: E1128 15:54:52.229524 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:55:07 crc kubenswrapper[4857]: I1128 15:55:07.228916 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:55:07 crc kubenswrapper[4857]: E1128 15:55:07.229641 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:55:22 crc kubenswrapper[4857]: I1128 15:55:22.229166 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:55:22 crc kubenswrapper[4857]: E1128 15:55:22.231361 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:55:35 crc kubenswrapper[4857]: I1128 15:55:35.229957 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:55:35 crc kubenswrapper[4857]: E1128 15:55:35.232072 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:55:47 crc kubenswrapper[4857]: I1128 15:55:47.229163 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:55:47 crc kubenswrapper[4857]: E1128 15:55:47.230018 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:56:02 crc kubenswrapper[4857]: I1128 15:56:02.229683 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:56:02 crc kubenswrapper[4857]: E1128 15:56:02.230905 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:56:13 crc kubenswrapper[4857]: I1128 15:56:13.229039 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:56:13 crc kubenswrapper[4857]: E1128 15:56:13.229806 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:56:24 crc kubenswrapper[4857]: I1128 15:56:24.229493 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:56:24 crc kubenswrapper[4857]: E1128 15:56:24.231590 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:56:39 crc kubenswrapper[4857]: I1128 15:56:39.229594 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:56:39 crc kubenswrapper[4857]: E1128 15:56:39.230601 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:56:51 crc kubenswrapper[4857]: I1128 15:56:51.230189 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:56:51 crc kubenswrapper[4857]: E1128 15:56:51.231047 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:57:03 crc kubenswrapper[4857]: I1128 15:57:03.229450 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:57:03 crc kubenswrapper[4857]: E1128 15:57:03.230275 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 15:57:07 crc kubenswrapper[4857]: I1128 15:57:07.902833 4857 trace.go:236] Trace[1506812388]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-cell1-server-0" (28-Nov-2025 15:57:06.451) (total time: 1451ms): Nov 28 15:57:07 crc kubenswrapper[4857]: Trace[1506812388]: [1.451026088s] [1.451026088s] END Nov 28 15:57:14 crc kubenswrapper[4857]: I1128 15:57:14.230485 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 15:57:14 crc kubenswrapper[4857]: I1128 15:57:14.536522 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"8f0b4f43b3c2b1312f25d3cf29da6c85d5039cb97cabf585a54e035436faab77"} Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.329179 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-78d47"] Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.335874 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.382302 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-78d47"] Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.450966 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8c59n\" (UniqueName: \"kubernetes.io/projected/8194a4b8-19fd-4978-872c-86ad1658200c-kube-api-access-8c59n\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.452121 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-utilities\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.452413 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-catalog-content\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.554741 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8c59n\" (UniqueName: \"kubernetes.io/projected/8194a4b8-19fd-4978-872c-86ad1658200c-kube-api-access-8c59n\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.554884 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-utilities\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.554966 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-catalog-content\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.555593 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-utilities\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.555602 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-catalog-content\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.595213 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8c59n\" (UniqueName: \"kubernetes.io/projected/8194a4b8-19fd-4978-872c-86ad1658200c-kube-api-access-8c59n\") pod \"redhat-operators-78d47\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:35 crc kubenswrapper[4857]: I1128 15:57:35.685576 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:36 crc kubenswrapper[4857]: I1128 15:57:36.222117 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-78d47"] Nov 28 15:57:36 crc kubenswrapper[4857]: I1128 15:57:36.815818 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerStarted","Data":"10df05887edbf6c71f4b8dccac072f229e947054ab5ac9c1c178b7aac41d000a"} Nov 28 15:57:37 crc kubenswrapper[4857]: I1128 15:57:37.840529 4857 generic.go:334] "Generic (PLEG): container finished" podID="8194a4b8-19fd-4978-872c-86ad1658200c" containerID="07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd" exitCode=0 Nov 28 15:57:37 crc kubenswrapper[4857]: I1128 15:57:37.840635 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerDied","Data":"07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd"} Nov 28 15:57:39 crc kubenswrapper[4857]: I1128 15:57:39.886091 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerStarted","Data":"2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618"} Nov 28 15:57:42 crc kubenswrapper[4857]: I1128 15:57:42.925856 4857 generic.go:334] "Generic (PLEG): container finished" podID="8194a4b8-19fd-4978-872c-86ad1658200c" containerID="2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618" exitCode=0 Nov 28 15:57:42 crc kubenswrapper[4857]: I1128 15:57:42.925934 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerDied","Data":"2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618"} Nov 28 15:57:44 crc kubenswrapper[4857]: I1128 15:57:44.963325 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerStarted","Data":"ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae"} Nov 28 15:57:44 crc kubenswrapper[4857]: I1128 15:57:44.987647 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-78d47" podStartSLOduration=3.82855116 podStartE2EDuration="9.987625923s" podCreationTimestamp="2025-11-28 15:57:35 +0000 UTC" firstStartedPulling="2025-11-28 15:57:37.843293218 +0000 UTC m=+8907.967234675" lastFinishedPulling="2025-11-28 15:57:44.002368001 +0000 UTC m=+8914.126309438" observedRunningTime="2025-11-28 15:57:44.982437725 +0000 UTC m=+8915.106379162" watchObservedRunningTime="2025-11-28 15:57:44.987625923 +0000 UTC m=+8915.111567360" Nov 28 15:57:45 crc kubenswrapper[4857]: I1128 15:57:45.686267 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:45 crc kubenswrapper[4857]: I1128 15:57:45.686700 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:46 crc kubenswrapper[4857]: I1128 15:57:46.733377 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-78d47" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="registry-server" probeResult="failure" output=< Nov 28 15:57:46 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 15:57:46 crc kubenswrapper[4857]: > Nov 28 15:57:55 crc kubenswrapper[4857]: I1128 15:57:55.754745 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:55 crc kubenswrapper[4857]: I1128 15:57:55.809774 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:58 crc kubenswrapper[4857]: I1128 15:57:58.444335 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-78d47"] Nov 28 15:57:58 crc kubenswrapper[4857]: I1128 15:57:58.445138 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-78d47" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="registry-server" containerID="cri-o://ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae" gracePeriod=2 Nov 28 15:57:58 crc kubenswrapper[4857]: I1128 15:57:58.943300 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.077974 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-catalog-content\") pod \"8194a4b8-19fd-4978-872c-86ad1658200c\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.078357 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-utilities\") pod \"8194a4b8-19fd-4978-872c-86ad1658200c\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.078414 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8c59n\" (UniqueName: \"kubernetes.io/projected/8194a4b8-19fd-4978-872c-86ad1658200c-kube-api-access-8c59n\") pod \"8194a4b8-19fd-4978-872c-86ad1658200c\" (UID: \"8194a4b8-19fd-4978-872c-86ad1658200c\") " Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.080690 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-utilities" (OuterVolumeSpecName: "utilities") pod "8194a4b8-19fd-4978-872c-86ad1658200c" (UID: "8194a4b8-19fd-4978-872c-86ad1658200c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.080917 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.090256 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8194a4b8-19fd-4978-872c-86ad1658200c-kube-api-access-8c59n" (OuterVolumeSpecName: "kube-api-access-8c59n") pod "8194a4b8-19fd-4978-872c-86ad1658200c" (UID: "8194a4b8-19fd-4978-872c-86ad1658200c"). InnerVolumeSpecName "kube-api-access-8c59n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.142350 4857 generic.go:334] "Generic (PLEG): container finished" podID="8194a4b8-19fd-4978-872c-86ad1658200c" containerID="ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae" exitCode=0 Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.142505 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerDied","Data":"ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae"} Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.142588 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-78d47" event={"ID":"8194a4b8-19fd-4978-872c-86ad1658200c","Type":"ContainerDied","Data":"10df05887edbf6c71f4b8dccac072f229e947054ab5ac9c1c178b7aac41d000a"} Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.142657 4857 scope.go:117] "RemoveContainer" containerID="ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.142835 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-78d47" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.165089 4857 scope.go:117] "RemoveContainer" containerID="2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.181806 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8194a4b8-19fd-4978-872c-86ad1658200c" (UID: "8194a4b8-19fd-4978-872c-86ad1658200c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.184266 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8194a4b8-19fd-4978-872c-86ad1658200c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.184294 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8c59n\" (UniqueName: \"kubernetes.io/projected/8194a4b8-19fd-4978-872c-86ad1658200c-kube-api-access-8c59n\") on node \"crc\" DevicePath \"\"" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.187447 4857 scope.go:117] "RemoveContainer" containerID="07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.227676 4857 scope.go:117] "RemoveContainer" containerID="ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae" Nov 28 15:57:59 crc kubenswrapper[4857]: E1128 15:57:59.229309 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae\": container with ID starting with ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae not found: ID does not exist" containerID="ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.229344 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae"} err="failed to get container status \"ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae\": rpc error: code = NotFound desc = could not find container \"ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae\": container with ID starting with ba936a0bf18de8ecd216146ddfa60b0f5aa9f5c17549d1df2449d97982edcaae not found: ID does not exist" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.229366 4857 scope.go:117] "RemoveContainer" containerID="2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618" Nov 28 15:57:59 crc kubenswrapper[4857]: E1128 15:57:59.229710 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618\": container with ID starting with 2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618 not found: ID does not exist" containerID="2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.229730 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618"} err="failed to get container status \"2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618\": rpc error: code = NotFound desc = could not find container \"2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618\": container with ID starting with 2afa0eb1e2768e29ba597e5f426e502ff4c1feb694a1d809a6c86ce37654a618 not found: ID does not exist" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.229768 4857 scope.go:117] "RemoveContainer" containerID="07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd" Nov 28 15:57:59 crc kubenswrapper[4857]: E1128 15:57:59.230071 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd\": container with ID starting with 07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd not found: ID does not exist" containerID="07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.230099 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd"} err="failed to get container status \"07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd\": rpc error: code = NotFound desc = could not find container \"07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd\": container with ID starting with 07a1c1e5850758f3c30e5862cddeb34f47823a9fe3e6e5522056a47fcbf634dd not found: ID does not exist" Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.492113 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-78d47"] Nov 28 15:57:59 crc kubenswrapper[4857]: I1128 15:57:59.503147 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-78d47"] Nov 28 15:58:00 crc kubenswrapper[4857]: I1128 15:58:00.243915 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" path="/var/lib/kubelet/pods/8194a4b8-19fd-4978-872c-86ad1658200c/volumes" Nov 28 15:58:44 crc kubenswrapper[4857]: I1128 15:58:44.287017 4857 trace.go:236] Trace[1293514393]: "Calculate volume metrics of persistence for pod openstack/rabbitmq-cell1-server-0" (28-Nov-2025 15:58:43.270) (total time: 1016ms): Nov 28 15:58:44 crc kubenswrapper[4857]: Trace[1293514393]: [1.016635235s] [1.016635235s] END Nov 28 15:58:47 crc kubenswrapper[4857]: I1128 15:58:47.710384 4857 generic.go:334] "Generic (PLEG): container finished" podID="1143dca0-289a-4a62-9437-07ee877305c2" containerID="bf39f0e7e2a48a9fff99f6ab1da0843e37c1cdd662608a66fa53872a9376c051" exitCode=0 Nov 28 15:58:47 crc kubenswrapper[4857]: I1128 15:58:47.710462 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" event={"ID":"1143dca0-289a-4a62-9437-07ee877305c2","Type":"ContainerDied","Data":"bf39f0e7e2a48a9fff99f6ab1da0843e37c1cdd662608a66fa53872a9376c051"} Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.230866 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.323017 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-combined-ca-bundle\") pod \"1143dca0-289a-4a62-9437-07ee877305c2\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.323130 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ceph\") pod \"1143dca0-289a-4a62-9437-07ee877305c2\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.323265 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ssh-key\") pod \"1143dca0-289a-4a62-9437-07ee877305c2\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.323341 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-inventory\") pod \"1143dca0-289a-4a62-9437-07ee877305c2\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.323468 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtkq4\" (UniqueName: \"kubernetes.io/projected/1143dca0-289a-4a62-9437-07ee877305c2-kube-api-access-wtkq4\") pod \"1143dca0-289a-4a62-9437-07ee877305c2\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.323513 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-agent-neutron-config-0\") pod \"1143dca0-289a-4a62-9437-07ee877305c2\" (UID: \"1143dca0-289a-4a62-9437-07ee877305c2\") " Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.330208 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1143dca0-289a-4a62-9437-07ee877305c2-kube-api-access-wtkq4" (OuterVolumeSpecName: "kube-api-access-wtkq4") pod "1143dca0-289a-4a62-9437-07ee877305c2" (UID: "1143dca0-289a-4a62-9437-07ee877305c2"). InnerVolumeSpecName "kube-api-access-wtkq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.330805 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ceph" (OuterVolumeSpecName: "ceph") pod "1143dca0-289a-4a62-9437-07ee877305c2" (UID: "1143dca0-289a-4a62-9437-07ee877305c2"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.333088 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "1143dca0-289a-4a62-9437-07ee877305c2" (UID: "1143dca0-289a-4a62-9437-07ee877305c2"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.362644 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-inventory" (OuterVolumeSpecName: "inventory") pod "1143dca0-289a-4a62-9437-07ee877305c2" (UID: "1143dca0-289a-4a62-9437-07ee877305c2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.366123 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "1143dca0-289a-4a62-9437-07ee877305c2" (UID: "1143dca0-289a-4a62-9437-07ee877305c2"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.369182 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1143dca0-289a-4a62-9437-07ee877305c2" (UID: "1143dca0-289a-4a62-9437-07ee877305c2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.426653 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtkq4\" (UniqueName: \"kubernetes.io/projected/1143dca0-289a-4a62-9437-07ee877305c2-kube-api-access-wtkq4\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.426710 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.426725 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.426740 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.426753 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.426789 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1143dca0-289a-4a62-9437-07ee877305c2-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.737716 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" event={"ID":"1143dca0-289a-4a62-9437-07ee877305c2","Type":"ContainerDied","Data":"c5dcc8f89f2e8cfeecfb7afd21fe8ce58fbf4ecc8d08743b9a005ea754cfdbdb"} Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.738005 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5dcc8f89f2e8cfeecfb7afd21fe8ce58fbf4ecc8d08743b9a005ea754cfdbdb" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.737751 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-lwbfw" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.852933 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh"] Nov 28 15:58:49 crc kubenswrapper[4857]: E1128 15:58:49.853434 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="extract-utilities" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.853453 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="extract-utilities" Nov 28 15:58:49 crc kubenswrapper[4857]: E1128 15:58:49.853465 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1143dca0-289a-4a62-9437-07ee877305c2" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.853471 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="1143dca0-289a-4a62-9437-07ee877305c2" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 15:58:49 crc kubenswrapper[4857]: E1128 15:58:49.853480 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="extract-content" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.853486 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="extract-content" Nov 28 15:58:49 crc kubenswrapper[4857]: E1128 15:58:49.853514 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="registry-server" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.853520 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="registry-server" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.853725 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="1143dca0-289a-4a62-9437-07ee877305c2" containerName="neutron-sriov-openstack-openstack-cell1" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.853748 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="8194a4b8-19fd-4978-872c-86ad1658200c" containerName="registry-server" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.854554 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.858519 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.858905 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.859072 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.858915 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.859529 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.868074 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh"] Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.941231 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6c8c6\" (UniqueName: \"kubernetes.io/projected/d8c62c98-9304-412b-9738-99fb9dad59a6-kube-api-access-6c8c6\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.941301 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.941378 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.941607 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.941719 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:49 crc kubenswrapper[4857]: I1128 15:58:49.941854 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.043553 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.043723 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6c8c6\" (UniqueName: \"kubernetes.io/projected/d8c62c98-9304-412b-9738-99fb9dad59a6-kube-api-access-6c8c6\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.043753 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.043788 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.043824 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.043872 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.048424 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.048858 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ceph\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.050245 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.055259 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.057789 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.059237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6c8c6\" (UniqueName: \"kubernetes.io/projected/d8c62c98-9304-412b-9738-99fb9dad59a6-kube-api-access-6c8c6\") pod \"neutron-dhcp-openstack-openstack-cell1-hwdxh\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.188920 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.733917 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh"] Nov 28 15:58:50 crc kubenswrapper[4857]: W1128 15:58:50.738020 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8c62c98_9304_412b_9738_99fb9dad59a6.slice/crio-25fca2b69e9b299f9801e38a86602078214cde7e8a846e1e5bced8919d965efa WatchSource:0}: Error finding container 25fca2b69e9b299f9801e38a86602078214cde7e8a846e1e5bced8919d965efa: Status 404 returned error can't find the container with id 25fca2b69e9b299f9801e38a86602078214cde7e8a846e1e5bced8919d965efa Nov 28 15:58:50 crc kubenswrapper[4857]: I1128 15:58:50.743757 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 15:58:51 crc kubenswrapper[4857]: I1128 15:58:51.770875 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" event={"ID":"d8c62c98-9304-412b-9738-99fb9dad59a6","Type":"ContainerStarted","Data":"75ddae5522292d4898a026f3c65330438de491f69d6d3417fb97247ffd53a31a"} Nov 28 15:58:51 crc kubenswrapper[4857]: I1128 15:58:51.771560 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" event={"ID":"d8c62c98-9304-412b-9738-99fb9dad59a6","Type":"ContainerStarted","Data":"25fca2b69e9b299f9801e38a86602078214cde7e8a846e1e5bced8919d965efa"} Nov 28 15:58:51 crc kubenswrapper[4857]: I1128 15:58:51.801744 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" podStartSLOduration=2.61920132 podStartE2EDuration="2.801728124s" podCreationTimestamp="2025-11-28 15:58:49 +0000 UTC" firstStartedPulling="2025-11-28 15:58:50.743463811 +0000 UTC m=+8980.867405248" lastFinishedPulling="2025-11-28 15:58:50.925990615 +0000 UTC m=+8981.049932052" observedRunningTime="2025-11-28 15:58:51.792223051 +0000 UTC m=+8981.916164478" watchObservedRunningTime="2025-11-28 15:58:51.801728124 +0000 UTC m=+8981.925669561" Nov 28 15:59:41 crc kubenswrapper[4857]: I1128 15:59:41.308851 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 15:59:41 crc kubenswrapper[4857]: I1128 15:59:41.309493 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.169020 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4"] Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.172832 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.182571 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.182835 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.186166 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4"] Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.234787 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnthv\" (UniqueName: \"kubernetes.io/projected/034cf81c-21e4-42d5-808a-c84627b98c4f-kube-api-access-dnthv\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.234869 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/034cf81c-21e4-42d5-808a-c84627b98c4f-config-volume\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.234978 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/034cf81c-21e4-42d5-808a-c84627b98c4f-secret-volume\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.339008 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnthv\" (UniqueName: \"kubernetes.io/projected/034cf81c-21e4-42d5-808a-c84627b98c4f-kube-api-access-dnthv\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.339552 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/034cf81c-21e4-42d5-808a-c84627b98c4f-config-volume\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.339810 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/034cf81c-21e4-42d5-808a-c84627b98c4f-secret-volume\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.341155 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/034cf81c-21e4-42d5-808a-c84627b98c4f-config-volume\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.345800 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/034cf81c-21e4-42d5-808a-c84627b98c4f-secret-volume\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.358462 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnthv\" (UniqueName: \"kubernetes.io/projected/034cf81c-21e4-42d5-808a-c84627b98c4f-kube-api-access-dnthv\") pod \"collect-profiles-29405760-mlrf4\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:00 crc kubenswrapper[4857]: I1128 16:00:00.528786 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:01 crc kubenswrapper[4857]: I1128 16:00:01.036987 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4"] Nov 28 16:00:01 crc kubenswrapper[4857]: I1128 16:00:01.591736 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" event={"ID":"034cf81c-21e4-42d5-808a-c84627b98c4f","Type":"ContainerStarted","Data":"e3a145006daeb9c41c75f9f4014fa31c95be3f2f5e7e36baf46446fc71ee9809"} Nov 28 16:00:02 crc kubenswrapper[4857]: I1128 16:00:02.620782 4857 generic.go:334] "Generic (PLEG): container finished" podID="034cf81c-21e4-42d5-808a-c84627b98c4f" containerID="019b5a3fcba292fdbb37592bc67635926634a0123457a73df2c2a344f75cc291" exitCode=0 Nov 28 16:00:02 crc kubenswrapper[4857]: I1128 16:00:02.620869 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" event={"ID":"034cf81c-21e4-42d5-808a-c84627b98c4f","Type":"ContainerDied","Data":"019b5a3fcba292fdbb37592bc67635926634a0123457a73df2c2a344f75cc291"} Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.023355 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.125008 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnthv\" (UniqueName: \"kubernetes.io/projected/034cf81c-21e4-42d5-808a-c84627b98c4f-kube-api-access-dnthv\") pod \"034cf81c-21e4-42d5-808a-c84627b98c4f\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.125420 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/034cf81c-21e4-42d5-808a-c84627b98c4f-config-volume\") pod \"034cf81c-21e4-42d5-808a-c84627b98c4f\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.125496 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/034cf81c-21e4-42d5-808a-c84627b98c4f-secret-volume\") pod \"034cf81c-21e4-42d5-808a-c84627b98c4f\" (UID: \"034cf81c-21e4-42d5-808a-c84627b98c4f\") " Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.126746 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/034cf81c-21e4-42d5-808a-c84627b98c4f-config-volume" (OuterVolumeSpecName: "config-volume") pod "034cf81c-21e4-42d5-808a-c84627b98c4f" (UID: "034cf81c-21e4-42d5-808a-c84627b98c4f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.133222 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/034cf81c-21e4-42d5-808a-c84627b98c4f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "034cf81c-21e4-42d5-808a-c84627b98c4f" (UID: "034cf81c-21e4-42d5-808a-c84627b98c4f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.133862 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/034cf81c-21e4-42d5-808a-c84627b98c4f-kube-api-access-dnthv" (OuterVolumeSpecName: "kube-api-access-dnthv") pod "034cf81c-21e4-42d5-808a-c84627b98c4f" (UID: "034cf81c-21e4-42d5-808a-c84627b98c4f"). InnerVolumeSpecName "kube-api-access-dnthv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.229450 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnthv\" (UniqueName: \"kubernetes.io/projected/034cf81c-21e4-42d5-808a-c84627b98c4f-kube-api-access-dnthv\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.229501 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/034cf81c-21e4-42d5-808a-c84627b98c4f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.229513 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/034cf81c-21e4-42d5-808a-c84627b98c4f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.645636 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" event={"ID":"034cf81c-21e4-42d5-808a-c84627b98c4f","Type":"ContainerDied","Data":"e3a145006daeb9c41c75f9f4014fa31c95be3f2f5e7e36baf46446fc71ee9809"} Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.645686 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3a145006daeb9c41c75f9f4014fa31c95be3f2f5e7e36baf46446fc71ee9809" Nov 28 16:00:04 crc kubenswrapper[4857]: I1128 16:00:04.645684 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405760-mlrf4" Nov 28 16:00:05 crc kubenswrapper[4857]: I1128 16:00:05.114111 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9"] Nov 28 16:00:05 crc kubenswrapper[4857]: I1128 16:00:05.126366 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405715-mvlx9"] Nov 28 16:00:06 crc kubenswrapper[4857]: I1128 16:00:06.246317 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f30be136-8e3c-486b-af61-239b0ade4181" path="/var/lib/kubelet/pods/f30be136-8e3c-486b-af61-239b0ade4181/volumes" Nov 28 16:00:11 crc kubenswrapper[4857]: I1128 16:00:11.309248 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:00:11 crc kubenswrapper[4857]: I1128 16:00:11.310209 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.015887 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jzwtb"] Nov 28 16:00:12 crc kubenswrapper[4857]: E1128 16:00:12.017550 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="034cf81c-21e4-42d5-808a-c84627b98c4f" containerName="collect-profiles" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.017588 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="034cf81c-21e4-42d5-808a-c84627b98c4f" containerName="collect-profiles" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.018128 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="034cf81c-21e4-42d5-808a-c84627b98c4f" containerName="collect-profiles" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.021629 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.031212 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jzwtb"] Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.045386 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-utilities\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.045720 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-catalog-content\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.045913 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94t2f\" (UniqueName: \"kubernetes.io/projected/cd30cc6b-2e55-48dd-886a-a99269745675-kube-api-access-94t2f\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.149057 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94t2f\" (UniqueName: \"kubernetes.io/projected/cd30cc6b-2e55-48dd-886a-a99269745675-kube-api-access-94t2f\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.149211 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-utilities\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.149308 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-catalog-content\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.149926 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-utilities\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.149982 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-catalog-content\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.174761 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94t2f\" (UniqueName: \"kubernetes.io/projected/cd30cc6b-2e55-48dd-886a-a99269745675-kube-api-access-94t2f\") pod \"community-operators-jzwtb\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.371388 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:12 crc kubenswrapper[4857]: I1128 16:00:12.973119 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jzwtb"] Nov 28 16:00:13 crc kubenswrapper[4857]: I1128 16:00:13.771286 4857 generic.go:334] "Generic (PLEG): container finished" podID="cd30cc6b-2e55-48dd-886a-a99269745675" containerID="64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f" exitCode=0 Nov 28 16:00:13 crc kubenswrapper[4857]: I1128 16:00:13.771666 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jzwtb" event={"ID":"cd30cc6b-2e55-48dd-886a-a99269745675","Type":"ContainerDied","Data":"64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f"} Nov 28 16:00:13 crc kubenswrapper[4857]: I1128 16:00:13.771713 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jzwtb" event={"ID":"cd30cc6b-2e55-48dd-886a-a99269745675","Type":"ContainerStarted","Data":"354ac9484261fe48db4c105565538332e91a8043585ca022e689c91cbce2fb1f"} Nov 28 16:00:19 crc kubenswrapper[4857]: I1128 16:00:19.857384 4857 generic.go:334] "Generic (PLEG): container finished" podID="cd30cc6b-2e55-48dd-886a-a99269745675" containerID="67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526" exitCode=0 Nov 28 16:00:19 crc kubenswrapper[4857]: I1128 16:00:19.857553 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jzwtb" event={"ID":"cd30cc6b-2e55-48dd-886a-a99269745675","Type":"ContainerDied","Data":"67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526"} Nov 28 16:00:23 crc kubenswrapper[4857]: I1128 16:00:23.906472 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jzwtb" event={"ID":"cd30cc6b-2e55-48dd-886a-a99269745675","Type":"ContainerStarted","Data":"d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68"} Nov 28 16:00:23 crc kubenswrapper[4857]: I1128 16:00:23.932513 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jzwtb" podStartSLOduration=3.889815451 podStartE2EDuration="12.932489432s" podCreationTimestamp="2025-11-28 16:00:11 +0000 UTC" firstStartedPulling="2025-11-28 16:00:13.774387107 +0000 UTC m=+9063.898328534" lastFinishedPulling="2025-11-28 16:00:22.817061058 +0000 UTC m=+9072.941002515" observedRunningTime="2025-11-28 16:00:23.930518179 +0000 UTC m=+9074.054459656" watchObservedRunningTime="2025-11-28 16:00:23.932489432 +0000 UTC m=+9074.056430879" Nov 28 16:00:32 crc kubenswrapper[4857]: I1128 16:00:32.372458 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:32 crc kubenswrapper[4857]: I1128 16:00:32.373986 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:32 crc kubenswrapper[4857]: I1128 16:00:32.451916 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:32 crc kubenswrapper[4857]: I1128 16:00:32.509824 4857 scope.go:117] "RemoveContainer" containerID="7958381c3e9a0163e98916a4fb0ea5e926ec06a0a7de1d59eaf717df431354e6" Nov 28 16:00:33 crc kubenswrapper[4857]: I1128 16:00:33.080283 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:33 crc kubenswrapper[4857]: I1128 16:00:33.129850 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jzwtb"] Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.044987 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jzwtb" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="registry-server" containerID="cri-o://d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68" gracePeriod=2 Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.579460 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.640927 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-utilities\") pod \"cd30cc6b-2e55-48dd-886a-a99269745675\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.641052 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94t2f\" (UniqueName: \"kubernetes.io/projected/cd30cc6b-2e55-48dd-886a-a99269745675-kube-api-access-94t2f\") pod \"cd30cc6b-2e55-48dd-886a-a99269745675\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.641151 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-catalog-content\") pod \"cd30cc6b-2e55-48dd-886a-a99269745675\" (UID: \"cd30cc6b-2e55-48dd-886a-a99269745675\") " Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.645903 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-utilities" (OuterVolumeSpecName: "utilities") pod "cd30cc6b-2e55-48dd-886a-a99269745675" (UID: "cd30cc6b-2e55-48dd-886a-a99269745675"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.695014 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd30cc6b-2e55-48dd-886a-a99269745675" (UID: "cd30cc6b-2e55-48dd-886a-a99269745675"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.743985 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:35 crc kubenswrapper[4857]: I1128 16:00:35.744014 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd30cc6b-2e55-48dd-886a-a99269745675-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.063056 4857 generic.go:334] "Generic (PLEG): container finished" podID="cd30cc6b-2e55-48dd-886a-a99269745675" containerID="d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68" exitCode=0 Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.063126 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jzwtb" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.063170 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jzwtb" event={"ID":"cd30cc6b-2e55-48dd-886a-a99269745675","Type":"ContainerDied","Data":"d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68"} Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.065844 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jzwtb" event={"ID":"cd30cc6b-2e55-48dd-886a-a99269745675","Type":"ContainerDied","Data":"354ac9484261fe48db4c105565538332e91a8043585ca022e689c91cbce2fb1f"} Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.066055 4857 scope.go:117] "RemoveContainer" containerID="d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.107109 4857 scope.go:117] "RemoveContainer" containerID="67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.276066 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd30cc6b-2e55-48dd-886a-a99269745675-kube-api-access-94t2f" (OuterVolumeSpecName: "kube-api-access-94t2f") pod "cd30cc6b-2e55-48dd-886a-a99269745675" (UID: "cd30cc6b-2e55-48dd-886a-a99269745675"). InnerVolumeSpecName "kube-api-access-94t2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.318121 4857 scope.go:117] "RemoveContainer" containerID="64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.358363 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94t2f\" (UniqueName: \"kubernetes.io/projected/cd30cc6b-2e55-48dd-886a-a99269745675-kube-api-access-94t2f\") on node \"crc\" DevicePath \"\"" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.468186 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jzwtb"] Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.484345 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jzwtb"] Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.492106 4857 scope.go:117] "RemoveContainer" containerID="d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68" Nov 28 16:00:36 crc kubenswrapper[4857]: E1128 16:00:36.493044 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68\": container with ID starting with d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68 not found: ID does not exist" containerID="d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.493081 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68"} err="failed to get container status \"d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68\": rpc error: code = NotFound desc = could not find container \"d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68\": container with ID starting with d0db06c942296993a6601c1d8731d75b727f688b4a8c21b9b103fc5d5a0ccb68 not found: ID does not exist" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.493102 4857 scope.go:117] "RemoveContainer" containerID="67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526" Nov 28 16:00:36 crc kubenswrapper[4857]: E1128 16:00:36.493922 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526\": container with ID starting with 67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526 not found: ID does not exist" containerID="67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.494071 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526"} err="failed to get container status \"67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526\": rpc error: code = NotFound desc = could not find container \"67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526\": container with ID starting with 67efa9d06d24df3891bd583d76421eb4e5b6fe4e97a2d8313976de9bb090d526 not found: ID does not exist" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.494171 4857 scope.go:117] "RemoveContainer" containerID="64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f" Nov 28 16:00:36 crc kubenswrapper[4857]: E1128 16:00:36.494719 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f\": container with ID starting with 64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f not found: ID does not exist" containerID="64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f" Nov 28 16:00:36 crc kubenswrapper[4857]: I1128 16:00:36.494772 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f"} err="failed to get container status \"64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f\": rpc error: code = NotFound desc = could not find container \"64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f\": container with ID starting with 64b9bbd38fcb8c3f6361976e9582eee8fc5c398137f38113c19a19f6e781125f not found: ID does not exist" Nov 28 16:00:38 crc kubenswrapper[4857]: I1128 16:00:38.242125 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" path="/var/lib/kubelet/pods/cd30cc6b-2e55-48dd-886a-a99269745675/volumes" Nov 28 16:00:41 crc kubenswrapper[4857]: I1128 16:00:41.308657 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:00:41 crc kubenswrapper[4857]: I1128 16:00:41.310165 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:00:41 crc kubenswrapper[4857]: I1128 16:00:41.310310 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 16:00:41 crc kubenswrapper[4857]: I1128 16:00:41.312005 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8f0b4f43b3c2b1312f25d3cf29da6c85d5039cb97cabf585a54e035436faab77"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:00:41 crc kubenswrapper[4857]: I1128 16:00:41.312143 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://8f0b4f43b3c2b1312f25d3cf29da6c85d5039cb97cabf585a54e035436faab77" gracePeriod=600 Nov 28 16:00:42 crc kubenswrapper[4857]: I1128 16:00:42.124823 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="8f0b4f43b3c2b1312f25d3cf29da6c85d5039cb97cabf585a54e035436faab77" exitCode=0 Nov 28 16:00:42 crc kubenswrapper[4857]: I1128 16:00:42.124873 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"8f0b4f43b3c2b1312f25d3cf29da6c85d5039cb97cabf585a54e035436faab77"} Nov 28 16:00:42 crc kubenswrapper[4857]: I1128 16:00:42.125361 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626"} Nov 28 16:00:42 crc kubenswrapper[4857]: I1128 16:00:42.125383 4857 scope.go:117] "RemoveContainer" containerID="344a437ad15b006fb0ad8fffd41a8343e36703951524ac5b5fe770932213a6ba" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.159066 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29405761-9p2mh"] Nov 28 16:01:00 crc kubenswrapper[4857]: E1128 16:01:00.160716 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.160755 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4857]: E1128 16:01:00.160808 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="extract-content" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.160822 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="extract-content" Nov 28 16:01:00 crc kubenswrapper[4857]: E1128 16:01:00.160865 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="extract-utilities" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.160881 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="extract-utilities" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.161397 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd30cc6b-2e55-48dd-886a-a99269745675" containerName="registry-server" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.162811 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.190063 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405761-9p2mh"] Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.252026 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-fernet-keys\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.252091 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-config-data\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.252161 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h64xl\" (UniqueName: \"kubernetes.io/projected/783defcf-e557-4a27-8e0f-e19bccf6bdf9-kube-api-access-h64xl\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.252228 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-combined-ca-bundle\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.354492 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-combined-ca-bundle\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.354659 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-fernet-keys\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.354704 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-config-data\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.354752 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h64xl\" (UniqueName: \"kubernetes.io/projected/783defcf-e557-4a27-8e0f-e19bccf6bdf9-kube-api-access-h64xl\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.360721 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-combined-ca-bundle\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.361841 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-fernet-keys\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.368762 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-config-data\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.370383 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h64xl\" (UniqueName: \"kubernetes.io/projected/783defcf-e557-4a27-8e0f-e19bccf6bdf9-kube-api-access-h64xl\") pod \"keystone-cron-29405761-9p2mh\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.501420 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:00 crc kubenswrapper[4857]: I1128 16:01:00.961616 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29405761-9p2mh"] Nov 28 16:01:01 crc kubenswrapper[4857]: I1128 16:01:01.319037 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-9p2mh" event={"ID":"783defcf-e557-4a27-8e0f-e19bccf6bdf9","Type":"ContainerStarted","Data":"7cf55b020a9de27107456312189666728c1210ce14ddcff1e6376f6331cf6add"} Nov 28 16:01:02 crc kubenswrapper[4857]: I1128 16:01:02.336776 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-9p2mh" event={"ID":"783defcf-e557-4a27-8e0f-e19bccf6bdf9","Type":"ContainerStarted","Data":"cfce7c4d6f208f2624d9722f36eb37a86ea81cd1d66d6ae293ae50c0f5367b31"} Nov 28 16:01:02 crc kubenswrapper[4857]: I1128 16:01:02.389534 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29405761-9p2mh" podStartSLOduration=2.389505809 podStartE2EDuration="2.389505809s" podCreationTimestamp="2025-11-28 16:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:01:02.364466263 +0000 UTC m=+9112.488407740" watchObservedRunningTime="2025-11-28 16:01:02.389505809 +0000 UTC m=+9112.513447266" Nov 28 16:01:04 crc kubenswrapper[4857]: I1128 16:01:04.366062 4857 generic.go:334] "Generic (PLEG): container finished" podID="783defcf-e557-4a27-8e0f-e19bccf6bdf9" containerID="cfce7c4d6f208f2624d9722f36eb37a86ea81cd1d66d6ae293ae50c0f5367b31" exitCode=0 Nov 28 16:01:04 crc kubenswrapper[4857]: I1128 16:01:04.366430 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-9p2mh" event={"ID":"783defcf-e557-4a27-8e0f-e19bccf6bdf9","Type":"ContainerDied","Data":"cfce7c4d6f208f2624d9722f36eb37a86ea81cd1d66d6ae293ae50c0f5367b31"} Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.799727 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.877051 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-combined-ca-bundle\") pod \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.877257 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h64xl\" (UniqueName: \"kubernetes.io/projected/783defcf-e557-4a27-8e0f-e19bccf6bdf9-kube-api-access-h64xl\") pod \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.877413 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-fernet-keys\") pod \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.877587 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-config-data\") pod \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\" (UID: \"783defcf-e557-4a27-8e0f-e19bccf6bdf9\") " Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.882573 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "783defcf-e557-4a27-8e0f-e19bccf6bdf9" (UID: "783defcf-e557-4a27-8e0f-e19bccf6bdf9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.883178 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/783defcf-e557-4a27-8e0f-e19bccf6bdf9-kube-api-access-h64xl" (OuterVolumeSpecName: "kube-api-access-h64xl") pod "783defcf-e557-4a27-8e0f-e19bccf6bdf9" (UID: "783defcf-e557-4a27-8e0f-e19bccf6bdf9"). InnerVolumeSpecName "kube-api-access-h64xl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.905915 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "783defcf-e557-4a27-8e0f-e19bccf6bdf9" (UID: "783defcf-e557-4a27-8e0f-e19bccf6bdf9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.942434 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-config-data" (OuterVolumeSpecName: "config-data") pod "783defcf-e557-4a27-8e0f-e19bccf6bdf9" (UID: "783defcf-e557-4a27-8e0f-e19bccf6bdf9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.994238 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.994274 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.994288 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h64xl\" (UniqueName: \"kubernetes.io/projected/783defcf-e557-4a27-8e0f-e19bccf6bdf9-kube-api-access-h64xl\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:05 crc kubenswrapper[4857]: I1128 16:01:05.994297 4857 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/783defcf-e557-4a27-8e0f-e19bccf6bdf9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 28 16:01:06 crc kubenswrapper[4857]: I1128 16:01:06.389187 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29405761-9p2mh" event={"ID":"783defcf-e557-4a27-8e0f-e19bccf6bdf9","Type":"ContainerDied","Data":"7cf55b020a9de27107456312189666728c1210ce14ddcff1e6376f6331cf6add"} Nov 28 16:01:06 crc kubenswrapper[4857]: I1128 16:01:06.389487 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7cf55b020a9de27107456312189666728c1210ce14ddcff1e6376f6331cf6add" Nov 28 16:01:06 crc kubenswrapper[4857]: I1128 16:01:06.389256 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29405761-9p2mh" Nov 28 16:02:41 crc kubenswrapper[4857]: I1128 16:02:41.309102 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:02:41 crc kubenswrapper[4857]: I1128 16:02:41.309666 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:03:11 crc kubenswrapper[4857]: I1128 16:03:11.309213 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:03:11 crc kubenswrapper[4857]: I1128 16:03:11.309978 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.154235 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7f7z7"] Nov 28 16:03:13 crc kubenswrapper[4857]: E1128 16:03:13.155264 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="783defcf-e557-4a27-8e0f-e19bccf6bdf9" containerName="keystone-cron" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.155281 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="783defcf-e557-4a27-8e0f-e19bccf6bdf9" containerName="keystone-cron" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.155524 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="783defcf-e557-4a27-8e0f-e19bccf6bdf9" containerName="keystone-cron" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.157144 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.166968 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f7z7"] Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.265876 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-86hcq\" (UniqueName: \"kubernetes.io/projected/93defbc9-f188-43a1-ac0a-0214be9030ea-kube-api-access-86hcq\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.265977 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-catalog-content\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.266765 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-utilities\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.369366 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-86hcq\" (UniqueName: \"kubernetes.io/projected/93defbc9-f188-43a1-ac0a-0214be9030ea-kube-api-access-86hcq\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.369458 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-catalog-content\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.369539 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-utilities\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.369964 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-catalog-content\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.370308 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-utilities\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.410618 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-86hcq\" (UniqueName: \"kubernetes.io/projected/93defbc9-f188-43a1-ac0a-0214be9030ea-kube-api-access-86hcq\") pod \"redhat-marketplace-7f7z7\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.487645 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:13 crc kubenswrapper[4857]: I1128 16:03:13.951617 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f7z7"] Nov 28 16:03:14 crc kubenswrapper[4857]: I1128 16:03:14.933196 4857 generic.go:334] "Generic (PLEG): container finished" podID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerID="2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4" exitCode=0 Nov 28 16:03:14 crc kubenswrapper[4857]: I1128 16:03:14.933266 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f7z7" event={"ID":"93defbc9-f188-43a1-ac0a-0214be9030ea","Type":"ContainerDied","Data":"2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4"} Nov 28 16:03:14 crc kubenswrapper[4857]: I1128 16:03:14.933852 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f7z7" event={"ID":"93defbc9-f188-43a1-ac0a-0214be9030ea","Type":"ContainerStarted","Data":"34dbc33c35df8100d0aa1ef0b331077eda075f83b5b152f2919cab9f1abe1028"} Nov 28 16:03:16 crc kubenswrapper[4857]: I1128 16:03:16.962667 4857 generic.go:334] "Generic (PLEG): container finished" podID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerID="edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8" exitCode=0 Nov 28 16:03:16 crc kubenswrapper[4857]: I1128 16:03:16.962960 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f7z7" event={"ID":"93defbc9-f188-43a1-ac0a-0214be9030ea","Type":"ContainerDied","Data":"edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8"} Nov 28 16:03:18 crc kubenswrapper[4857]: I1128 16:03:18.992557 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f7z7" event={"ID":"93defbc9-f188-43a1-ac0a-0214be9030ea","Type":"ContainerStarted","Data":"5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878"} Nov 28 16:03:19 crc kubenswrapper[4857]: I1128 16:03:19.014530 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7f7z7" podStartSLOduration=3.48053888 podStartE2EDuration="6.014514128s" podCreationTimestamp="2025-11-28 16:03:13 +0000 UTC" firstStartedPulling="2025-11-28 16:03:14.936460507 +0000 UTC m=+9245.060401944" lastFinishedPulling="2025-11-28 16:03:17.470435745 +0000 UTC m=+9247.594377192" observedRunningTime="2025-11-28 16:03:19.013013168 +0000 UTC m=+9249.136954705" watchObservedRunningTime="2025-11-28 16:03:19.014514128 +0000 UTC m=+9249.138455565" Nov 28 16:03:23 crc kubenswrapper[4857]: I1128 16:03:23.488712 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:23 crc kubenswrapper[4857]: I1128 16:03:23.490097 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:23 crc kubenswrapper[4857]: I1128 16:03:23.570367 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:24 crc kubenswrapper[4857]: I1128 16:03:24.122515 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:24 crc kubenswrapper[4857]: I1128 16:03:24.164847 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f7z7"] Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.075175 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7f7z7" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="registry-server" containerID="cri-o://5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878" gracePeriod=2 Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.556724 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.719998 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-utilities\") pod \"93defbc9-f188-43a1-ac0a-0214be9030ea\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.720125 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-86hcq\" (UniqueName: \"kubernetes.io/projected/93defbc9-f188-43a1-ac0a-0214be9030ea-kube-api-access-86hcq\") pod \"93defbc9-f188-43a1-ac0a-0214be9030ea\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.720266 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-catalog-content\") pod \"93defbc9-f188-43a1-ac0a-0214be9030ea\" (UID: \"93defbc9-f188-43a1-ac0a-0214be9030ea\") " Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.721796 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-utilities" (OuterVolumeSpecName: "utilities") pod "93defbc9-f188-43a1-ac0a-0214be9030ea" (UID: "93defbc9-f188-43a1-ac0a-0214be9030ea"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.727318 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93defbc9-f188-43a1-ac0a-0214be9030ea-kube-api-access-86hcq" (OuterVolumeSpecName: "kube-api-access-86hcq") pod "93defbc9-f188-43a1-ac0a-0214be9030ea" (UID: "93defbc9-f188-43a1-ac0a-0214be9030ea"). InnerVolumeSpecName "kube-api-access-86hcq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.748465 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93defbc9-f188-43a1-ac0a-0214be9030ea" (UID: "93defbc9-f188-43a1-ac0a-0214be9030ea"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.823480 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-86hcq\" (UniqueName: \"kubernetes.io/projected/93defbc9-f188-43a1-ac0a-0214be9030ea-kube-api-access-86hcq\") on node \"crc\" DevicePath \"\"" Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.823535 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:03:26 crc kubenswrapper[4857]: I1128 16:03:26.823556 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93defbc9-f188-43a1-ac0a-0214be9030ea-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.090838 4857 generic.go:334] "Generic (PLEG): container finished" podID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerID="5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878" exitCode=0 Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.090900 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7f7z7" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.090924 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f7z7" event={"ID":"93defbc9-f188-43a1-ac0a-0214be9030ea","Type":"ContainerDied","Data":"5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878"} Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.091612 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7f7z7" event={"ID":"93defbc9-f188-43a1-ac0a-0214be9030ea","Type":"ContainerDied","Data":"34dbc33c35df8100d0aa1ef0b331077eda075f83b5b152f2919cab9f1abe1028"} Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.091636 4857 scope.go:117] "RemoveContainer" containerID="5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.131433 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f7z7"] Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.136422 4857 scope.go:117] "RemoveContainer" containerID="edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.141694 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7f7z7"] Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.181781 4857 scope.go:117] "RemoveContainer" containerID="2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.261172 4857 scope.go:117] "RemoveContainer" containerID="5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878" Nov 28 16:03:27 crc kubenswrapper[4857]: E1128 16:03:27.261659 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878\": container with ID starting with 5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878 not found: ID does not exist" containerID="5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.261709 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878"} err="failed to get container status \"5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878\": rpc error: code = NotFound desc = could not find container \"5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878\": container with ID starting with 5c5362dbb6cfa952ff79da10dd6bce9e1925946e17e18a20a2b0119f76349878 not found: ID does not exist" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.261735 4857 scope.go:117] "RemoveContainer" containerID="edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8" Nov 28 16:03:27 crc kubenswrapper[4857]: E1128 16:03:27.262147 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8\": container with ID starting with edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8 not found: ID does not exist" containerID="edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.262193 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8"} err="failed to get container status \"edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8\": rpc error: code = NotFound desc = could not find container \"edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8\": container with ID starting with edf6047c4cb5d8a4f85c34049e5b4247fdd6314ade2dcc0cba3e9ed28d621ab8 not found: ID does not exist" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.262223 4857 scope.go:117] "RemoveContainer" containerID="2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4" Nov 28 16:03:27 crc kubenswrapper[4857]: E1128 16:03:27.262483 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4\": container with ID starting with 2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4 not found: ID does not exist" containerID="2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4" Nov 28 16:03:27 crc kubenswrapper[4857]: I1128 16:03:27.262520 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4"} err="failed to get container status \"2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4\": rpc error: code = NotFound desc = could not find container \"2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4\": container with ID starting with 2d483d4b8d89b919441057b21b9155f17c3f4919f1053cc553bfc32d54ba52b4 not found: ID does not exist" Nov 28 16:03:28 crc kubenswrapper[4857]: I1128 16:03:28.248814 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" path="/var/lib/kubelet/pods/93defbc9-f188-43a1-ac0a-0214be9030ea/volumes" Nov 28 16:03:41 crc kubenswrapper[4857]: I1128 16:03:41.308352 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:03:41 crc kubenswrapper[4857]: I1128 16:03:41.309034 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:03:41 crc kubenswrapper[4857]: I1128 16:03:41.309102 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 16:03:41 crc kubenswrapper[4857]: I1128 16:03:41.310051 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:03:41 crc kubenswrapper[4857]: I1128 16:03:41.310135 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" gracePeriod=600 Nov 28 16:03:41 crc kubenswrapper[4857]: E1128 16:03:41.499390 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:03:42 crc kubenswrapper[4857]: I1128 16:03:42.303031 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" exitCode=0 Nov 28 16:03:42 crc kubenswrapper[4857]: I1128 16:03:42.303426 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626"} Nov 28 16:03:42 crc kubenswrapper[4857]: I1128 16:03:42.303478 4857 scope.go:117] "RemoveContainer" containerID="8f0b4f43b3c2b1312f25d3cf29da6c85d5039cb97cabf585a54e035436faab77" Nov 28 16:03:42 crc kubenswrapper[4857]: I1128 16:03:42.304889 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:03:42 crc kubenswrapper[4857]: E1128 16:03:42.306030 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:03:53 crc kubenswrapper[4857]: I1128 16:03:53.229242 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:03:53 crc kubenswrapper[4857]: E1128 16:03:53.230126 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:04:07 crc kubenswrapper[4857]: I1128 16:04:07.230018 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:04:07 crc kubenswrapper[4857]: E1128 16:04:07.231400 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:04:19 crc kubenswrapper[4857]: I1128 16:04:19.228889 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:04:19 crc kubenswrapper[4857]: E1128 16:04:19.229981 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:04:32 crc kubenswrapper[4857]: I1128 16:04:32.229429 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:04:32 crc kubenswrapper[4857]: E1128 16:04:32.230254 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:04:47 crc kubenswrapper[4857]: I1128 16:04:47.229190 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:04:47 crc kubenswrapper[4857]: E1128 16:04:47.230342 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.015430 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-bzn6n"] Nov 28 16:04:56 crc kubenswrapper[4857]: E1128 16:04:56.016444 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="registry-server" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.016459 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="registry-server" Nov 28 16:04:56 crc kubenswrapper[4857]: E1128 16:04:56.016470 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="extract-utilities" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.016477 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="extract-utilities" Nov 28 16:04:56 crc kubenswrapper[4857]: E1128 16:04:56.016502 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="extract-content" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.016510 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="extract-content" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.016722 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="93defbc9-f188-43a1-ac0a-0214be9030ea" containerName="registry-server" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.018411 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.034633 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bzn6n"] Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.132822 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6lns\" (UniqueName: \"kubernetes.io/projected/0a4af6b5-7bcb-4641-883d-39da1cb53466-kube-api-access-d6lns\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.132918 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-catalog-content\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.133010 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-utilities\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.234603 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6lns\" (UniqueName: \"kubernetes.io/projected/0a4af6b5-7bcb-4641-883d-39da1cb53466-kube-api-access-d6lns\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.234706 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-catalog-content\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.234773 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-utilities\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.235306 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-utilities\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.235893 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-catalog-content\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.263385 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6lns\" (UniqueName: \"kubernetes.io/projected/0a4af6b5-7bcb-4641-883d-39da1cb53466-kube-api-access-d6lns\") pod \"certified-operators-bzn6n\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.341648 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:04:56 crc kubenswrapper[4857]: I1128 16:04:56.908308 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-bzn6n"] Nov 28 16:04:56 crc kubenswrapper[4857]: W1128 16:04:56.918408 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a4af6b5_7bcb_4641_883d_39da1cb53466.slice/crio-7ce5e89fe3182332bc6037125b26ff8528afd75b9b2f26c3f10d842583b00456 WatchSource:0}: Error finding container 7ce5e89fe3182332bc6037125b26ff8528afd75b9b2f26c3f10d842583b00456: Status 404 returned error can't find the container with id 7ce5e89fe3182332bc6037125b26ff8528afd75b9b2f26c3f10d842583b00456 Nov 28 16:04:57 crc kubenswrapper[4857]: I1128 16:04:57.217553 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzn6n" event={"ID":"0a4af6b5-7bcb-4641-883d-39da1cb53466","Type":"ContainerStarted","Data":"7ce5e89fe3182332bc6037125b26ff8528afd75b9b2f26c3f10d842583b00456"} Nov 28 16:04:58 crc kubenswrapper[4857]: I1128 16:04:58.229733 4857 generic.go:334] "Generic (PLEG): container finished" podID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerID="d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d" exitCode=0 Nov 28 16:04:58 crc kubenswrapper[4857]: I1128 16:04:58.232134 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:04:58 crc kubenswrapper[4857]: I1128 16:04:58.246418 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzn6n" event={"ID":"0a4af6b5-7bcb-4641-883d-39da1cb53466","Type":"ContainerDied","Data":"d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d"} Nov 28 16:05:01 crc kubenswrapper[4857]: I1128 16:05:01.229742 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:05:01 crc kubenswrapper[4857]: E1128 16:05:01.230990 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:05:03 crc kubenswrapper[4857]: I1128 16:05:03.285418 4857 generic.go:334] "Generic (PLEG): container finished" podID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerID="9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5" exitCode=0 Nov 28 16:05:03 crc kubenswrapper[4857]: I1128 16:05:03.285871 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzn6n" event={"ID":"0a4af6b5-7bcb-4641-883d-39da1cb53466","Type":"ContainerDied","Data":"9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5"} Nov 28 16:05:06 crc kubenswrapper[4857]: I1128 16:05:06.321079 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzn6n" event={"ID":"0a4af6b5-7bcb-4641-883d-39da1cb53466","Type":"ContainerStarted","Data":"089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51"} Nov 28 16:05:06 crc kubenswrapper[4857]: I1128 16:05:06.342113 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:05:06 crc kubenswrapper[4857]: I1128 16:05:06.342192 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:05:06 crc kubenswrapper[4857]: I1128 16:05:06.356943 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-bzn6n" podStartSLOduration=3.628021548 podStartE2EDuration="11.356918328s" podCreationTimestamp="2025-11-28 16:04:55 +0000 UTC" firstStartedPulling="2025-11-28 16:04:58.231745659 +0000 UTC m=+9348.355687106" lastFinishedPulling="2025-11-28 16:05:05.960642429 +0000 UTC m=+9356.084583886" observedRunningTime="2025-11-28 16:05:06.342032922 +0000 UTC m=+9356.465974359" watchObservedRunningTime="2025-11-28 16:05:06.356918328 +0000 UTC m=+9356.480859765" Nov 28 16:05:07 crc kubenswrapper[4857]: I1128 16:05:07.404401 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-bzn6n" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="registry-server" probeResult="failure" output=< Nov 28 16:05:07 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 16:05:07 crc kubenswrapper[4857]: > Nov 28 16:05:15 crc kubenswrapper[4857]: I1128 16:05:15.229666 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:05:15 crc kubenswrapper[4857]: E1128 16:05:15.230570 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:05:16 crc kubenswrapper[4857]: I1128 16:05:16.403672 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:05:16 crc kubenswrapper[4857]: I1128 16:05:16.464357 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:05:16 crc kubenswrapper[4857]: I1128 16:05:16.640522 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bzn6n"] Nov 28 16:05:17 crc kubenswrapper[4857]: I1128 16:05:17.438769 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-bzn6n" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="registry-server" containerID="cri-o://089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51" gracePeriod=2 Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.084637 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.207202 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-utilities\") pod \"0a4af6b5-7bcb-4641-883d-39da1cb53466\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.207352 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6lns\" (UniqueName: \"kubernetes.io/projected/0a4af6b5-7bcb-4641-883d-39da1cb53466-kube-api-access-d6lns\") pod \"0a4af6b5-7bcb-4641-883d-39da1cb53466\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.207429 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-catalog-content\") pod \"0a4af6b5-7bcb-4641-883d-39da1cb53466\" (UID: \"0a4af6b5-7bcb-4641-883d-39da1cb53466\") " Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.207848 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-utilities" (OuterVolumeSpecName: "utilities") pod "0a4af6b5-7bcb-4641-883d-39da1cb53466" (UID: "0a4af6b5-7bcb-4641-883d-39da1cb53466"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.214674 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a4af6b5-7bcb-4641-883d-39da1cb53466-kube-api-access-d6lns" (OuterVolumeSpecName: "kube-api-access-d6lns") pod "0a4af6b5-7bcb-4641-883d-39da1cb53466" (UID: "0a4af6b5-7bcb-4641-883d-39da1cb53466"). InnerVolumeSpecName "kube-api-access-d6lns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.247613 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0a4af6b5-7bcb-4641-883d-39da1cb53466" (UID: "0a4af6b5-7bcb-4641-883d-39da1cb53466"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.313472 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.313550 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6lns\" (UniqueName: \"kubernetes.io/projected/0a4af6b5-7bcb-4641-883d-39da1cb53466-kube-api-access-d6lns\") on node \"crc\" DevicePath \"\"" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.313573 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0a4af6b5-7bcb-4641-883d-39da1cb53466-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.452184 4857 generic.go:334] "Generic (PLEG): container finished" podID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerID="089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51" exitCode=0 Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.452224 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzn6n" event={"ID":"0a4af6b5-7bcb-4641-883d-39da1cb53466","Type":"ContainerDied","Data":"089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51"} Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.452247 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-bzn6n" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.452257 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-bzn6n" event={"ID":"0a4af6b5-7bcb-4641-883d-39da1cb53466","Type":"ContainerDied","Data":"7ce5e89fe3182332bc6037125b26ff8528afd75b9b2f26c3f10d842583b00456"} Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.452275 4857 scope.go:117] "RemoveContainer" containerID="089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.514203 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-bzn6n"] Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.529237 4857 scope.go:117] "RemoveContainer" containerID="9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.537919 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-bzn6n"] Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.591853 4857 scope.go:117] "RemoveContainer" containerID="d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.641779 4857 scope.go:117] "RemoveContainer" containerID="089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51" Nov 28 16:05:18 crc kubenswrapper[4857]: E1128 16:05:18.642234 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51\": container with ID starting with 089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51 not found: ID does not exist" containerID="089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.642264 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51"} err="failed to get container status \"089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51\": rpc error: code = NotFound desc = could not find container \"089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51\": container with ID starting with 089f9d72aef35c00576eb98a5bea1555e7785dc5cc507c5b8bc723927b7fea51 not found: ID does not exist" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.642282 4857 scope.go:117] "RemoveContainer" containerID="9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5" Nov 28 16:05:18 crc kubenswrapper[4857]: E1128 16:05:18.642572 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5\": container with ID starting with 9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5 not found: ID does not exist" containerID="9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.642596 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5"} err="failed to get container status \"9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5\": rpc error: code = NotFound desc = could not find container \"9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5\": container with ID starting with 9b46555f9bb06d186b3da982f6471f877d310184b5d24fc6216016943ba806b5 not found: ID does not exist" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.642608 4857 scope.go:117] "RemoveContainer" containerID="d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d" Nov 28 16:05:18 crc kubenswrapper[4857]: E1128 16:05:18.642829 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d\": container with ID starting with d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d not found: ID does not exist" containerID="d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d" Nov 28 16:05:18 crc kubenswrapper[4857]: I1128 16:05:18.642846 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d"} err="failed to get container status \"d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d\": rpc error: code = NotFound desc = could not find container \"d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d\": container with ID starting with d63be9de816bef589ebec30d68f39e14ba5ed7edfef1c32d4c5c4e688103fc9d not found: ID does not exist" Nov 28 16:05:20 crc kubenswrapper[4857]: I1128 16:05:20.305862 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" path="/var/lib/kubelet/pods/0a4af6b5-7bcb-4641-883d-39da1cb53466/volumes" Nov 28 16:05:30 crc kubenswrapper[4857]: I1128 16:05:30.242390 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:05:30 crc kubenswrapper[4857]: E1128 16:05:30.251074 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:05:44 crc kubenswrapper[4857]: I1128 16:05:44.229851 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:05:44 crc kubenswrapper[4857]: E1128 16:05:44.240904 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:05:58 crc kubenswrapper[4857]: I1128 16:05:58.229363 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:05:58 crc kubenswrapper[4857]: E1128 16:05:58.230671 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:06:13 crc kubenswrapper[4857]: I1128 16:06:13.236132 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:06:13 crc kubenswrapper[4857]: E1128 16:06:13.237571 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:06:25 crc kubenswrapper[4857]: I1128 16:06:25.228959 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:06:25 crc kubenswrapper[4857]: E1128 16:06:25.229895 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:06:40 crc kubenswrapper[4857]: I1128 16:06:40.235892 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:06:40 crc kubenswrapper[4857]: E1128 16:06:40.236860 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:06:51 crc kubenswrapper[4857]: I1128 16:06:51.229293 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:06:51 crc kubenswrapper[4857]: E1128 16:06:51.230122 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:07:02 crc kubenswrapper[4857]: I1128 16:07:02.229375 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:07:02 crc kubenswrapper[4857]: E1128 16:07:02.232415 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:07:17 crc kubenswrapper[4857]: I1128 16:07:17.229908 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:07:17 crc kubenswrapper[4857]: E1128 16:07:17.231254 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:07:29 crc kubenswrapper[4857]: I1128 16:07:29.229341 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:07:29 crc kubenswrapper[4857]: E1128 16:07:29.230136 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:07:41 crc kubenswrapper[4857]: I1128 16:07:41.229029 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:07:41 crc kubenswrapper[4857]: E1128 16:07:41.229853 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:07:55 crc kubenswrapper[4857]: I1128 16:07:55.228684 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:07:55 crc kubenswrapper[4857]: E1128 16:07:55.229435 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.110226 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rn4x7"] Nov 28 16:07:58 crc kubenswrapper[4857]: E1128 16:07:58.111721 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="registry-server" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.111745 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="registry-server" Nov 28 16:07:58 crc kubenswrapper[4857]: E1128 16:07:58.111786 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="extract-utilities" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.111798 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="extract-utilities" Nov 28 16:07:58 crc kubenswrapper[4857]: E1128 16:07:58.111854 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="extract-content" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.111868 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="extract-content" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.112287 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a4af6b5-7bcb-4641-883d-39da1cb53466" containerName="registry-server" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.115353 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.126084 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rn4x7"] Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.263366 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-catalog-content\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.263452 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxnvp\" (UniqueName: \"kubernetes.io/projected/69edcff5-bd9b-411d-a0d6-c8d104e034ae-kube-api-access-sxnvp\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.265939 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-utilities\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.367430 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-utilities\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.367658 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-catalog-content\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.367685 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxnvp\" (UniqueName: \"kubernetes.io/projected/69edcff5-bd9b-411d-a0d6-c8d104e034ae-kube-api-access-sxnvp\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.368670 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-catalog-content\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.369898 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-utilities\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.389660 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxnvp\" (UniqueName: \"kubernetes.io/projected/69edcff5-bd9b-411d-a0d6-c8d104e034ae-kube-api-access-sxnvp\") pod \"redhat-operators-rn4x7\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:58 crc kubenswrapper[4857]: I1128 16:07:58.465737 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:07:59 crc kubenswrapper[4857]: I1128 16:07:59.004704 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rn4x7"] Nov 28 16:07:59 crc kubenswrapper[4857]: W1128 16:07:59.008305 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod69edcff5_bd9b_411d_a0d6_c8d104e034ae.slice/crio-a533bf3e58139c67615ce3e801fb3212da577f47e7c7b5ce8c6c493e99f9b299 WatchSource:0}: Error finding container a533bf3e58139c67615ce3e801fb3212da577f47e7c7b5ce8c6c493e99f9b299: Status 404 returned error can't find the container with id a533bf3e58139c67615ce3e801fb3212da577f47e7c7b5ce8c6c493e99f9b299 Nov 28 16:07:59 crc kubenswrapper[4857]: I1128 16:07:59.204648 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerStarted","Data":"a533bf3e58139c67615ce3e801fb3212da577f47e7c7b5ce8c6c493e99f9b299"} Nov 28 16:08:00 crc kubenswrapper[4857]: I1128 16:08:00.219546 4857 generic.go:334] "Generic (PLEG): container finished" podID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerID="58156a1c09b7815e8ca6c5910d488a573dfe279fe4ca09b49dd7be195bd6ac31" exitCode=0 Nov 28 16:08:00 crc kubenswrapper[4857]: I1128 16:08:00.219626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerDied","Data":"58156a1c09b7815e8ca6c5910d488a573dfe279fe4ca09b49dd7be195bd6ac31"} Nov 28 16:08:02 crc kubenswrapper[4857]: I1128 16:08:02.244239 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerStarted","Data":"bf450896dc1b7aeda7f206f0735397b6e6d8d36fdf49d5f9828553f5ab8fb508"} Nov 28 16:08:05 crc kubenswrapper[4857]: I1128 16:08:05.284705 4857 generic.go:334] "Generic (PLEG): container finished" podID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerID="bf450896dc1b7aeda7f206f0735397b6e6d8d36fdf49d5f9828553f5ab8fb508" exitCode=0 Nov 28 16:08:05 crc kubenswrapper[4857]: I1128 16:08:05.284825 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerDied","Data":"bf450896dc1b7aeda7f206f0735397b6e6d8d36fdf49d5f9828553f5ab8fb508"} Nov 28 16:08:06 crc kubenswrapper[4857]: I1128 16:08:06.300809 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerStarted","Data":"9c2cfc942b86101a9b9cc9d9a16e1c7a165b41257c913211755f8f23f59ed8ea"} Nov 28 16:08:06 crc kubenswrapper[4857]: I1128 16:08:06.338357 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rn4x7" podStartSLOduration=2.705912953 podStartE2EDuration="8.33832914s" podCreationTimestamp="2025-11-28 16:07:58 +0000 UTC" firstStartedPulling="2025-11-28 16:08:00.222607722 +0000 UTC m=+9530.346549199" lastFinishedPulling="2025-11-28 16:08:05.855023949 +0000 UTC m=+9535.978965386" observedRunningTime="2025-11-28 16:08:06.320200141 +0000 UTC m=+9536.444141588" watchObservedRunningTime="2025-11-28 16:08:06.33832914 +0000 UTC m=+9536.462270597" Nov 28 16:08:08 crc kubenswrapper[4857]: I1128 16:08:08.466885 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:08:08 crc kubenswrapper[4857]: I1128 16:08:08.467219 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:08:10 crc kubenswrapper[4857]: I1128 16:08:10.027862 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rn4x7" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="registry-server" probeResult="failure" output=< Nov 28 16:08:10 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 16:08:10 crc kubenswrapper[4857]: > Nov 28 16:08:10 crc kubenswrapper[4857]: I1128 16:08:10.242381 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:08:10 crc kubenswrapper[4857]: E1128 16:08:10.242998 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:08:18 crc kubenswrapper[4857]: I1128 16:08:18.534197 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:08:18 crc kubenswrapper[4857]: I1128 16:08:18.597846 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:08:19 crc kubenswrapper[4857]: I1128 16:08:19.085286 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rn4x7"] Nov 28 16:08:20 crc kubenswrapper[4857]: I1128 16:08:20.465758 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rn4x7" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="registry-server" containerID="cri-o://9c2cfc942b86101a9b9cc9d9a16e1c7a165b41257c913211755f8f23f59ed8ea" gracePeriod=2 Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.496554 4857 generic.go:334] "Generic (PLEG): container finished" podID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerID="9c2cfc942b86101a9b9cc9d9a16e1c7a165b41257c913211755f8f23f59ed8ea" exitCode=0 Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.497018 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerDied","Data":"9c2cfc942b86101a9b9cc9d9a16e1c7a165b41257c913211755f8f23f59ed8ea"} Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.591793 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.705721 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sxnvp\" (UniqueName: \"kubernetes.io/projected/69edcff5-bd9b-411d-a0d6-c8d104e034ae-kube-api-access-sxnvp\") pod \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.705836 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-utilities\") pod \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.705990 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-catalog-content\") pod \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\" (UID: \"69edcff5-bd9b-411d-a0d6-c8d104e034ae\") " Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.706835 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-utilities" (OuterVolumeSpecName: "utilities") pod "69edcff5-bd9b-411d-a0d6-c8d104e034ae" (UID: "69edcff5-bd9b-411d-a0d6-c8d104e034ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.711666 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69edcff5-bd9b-411d-a0d6-c8d104e034ae-kube-api-access-sxnvp" (OuterVolumeSpecName: "kube-api-access-sxnvp") pod "69edcff5-bd9b-411d-a0d6-c8d104e034ae" (UID: "69edcff5-bd9b-411d-a0d6-c8d104e034ae"). InnerVolumeSpecName "kube-api-access-sxnvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.808033 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sxnvp\" (UniqueName: \"kubernetes.io/projected/69edcff5-bd9b-411d-a0d6-c8d104e034ae-kube-api-access-sxnvp\") on node \"crc\" DevicePath \"\"" Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.808064 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.826636 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "69edcff5-bd9b-411d-a0d6-c8d104e034ae" (UID: "69edcff5-bd9b-411d-a0d6-c8d104e034ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:08:21 crc kubenswrapper[4857]: I1128 16:08:21.909664 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/69edcff5-bd9b-411d-a0d6-c8d104e034ae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.509836 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rn4x7" event={"ID":"69edcff5-bd9b-411d-a0d6-c8d104e034ae","Type":"ContainerDied","Data":"a533bf3e58139c67615ce3e801fb3212da577f47e7c7b5ce8c6c493e99f9b299"} Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.510285 4857 scope.go:117] "RemoveContainer" containerID="9c2cfc942b86101a9b9cc9d9a16e1c7a165b41257c913211755f8f23f59ed8ea" Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.510468 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rn4x7" Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.543614 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rn4x7"] Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.555644 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rn4x7"] Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.599840 4857 scope.go:117] "RemoveContainer" containerID="bf450896dc1b7aeda7f206f0735397b6e6d8d36fdf49d5f9828553f5ab8fb508" Nov 28 16:08:22 crc kubenswrapper[4857]: I1128 16:08:22.645376 4857 scope.go:117] "RemoveContainer" containerID="58156a1c09b7815e8ca6c5910d488a573dfe279fe4ca09b49dd7be195bd6ac31" Nov 28 16:08:24 crc kubenswrapper[4857]: I1128 16:08:24.243302 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" path="/var/lib/kubelet/pods/69edcff5-bd9b-411d-a0d6-c8d104e034ae/volumes" Nov 28 16:08:25 crc kubenswrapper[4857]: I1128 16:08:25.229188 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:08:25 crc kubenswrapper[4857]: E1128 16:08:25.229874 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:08:38 crc kubenswrapper[4857]: I1128 16:08:38.230654 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:08:38 crc kubenswrapper[4857]: E1128 16:08:38.231891 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:08:53 crc kubenswrapper[4857]: I1128 16:08:53.228824 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:08:53 crc kubenswrapper[4857]: I1128 16:08:53.907599 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"c942c5d609ce7e62c0a6cd31d31c499b688c7fdc91159358f38ec73db2f1a01e"} Nov 28 16:09:08 crc kubenswrapper[4857]: I1128 16:09:08.081069 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8c62c98-9304-412b-9738-99fb9dad59a6" containerID="75ddae5522292d4898a026f3c65330438de491f69d6d3417fb97247ffd53a31a" exitCode=0 Nov 28 16:09:08 crc kubenswrapper[4857]: I1128 16:09:08.081156 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" event={"ID":"d8c62c98-9304-412b-9738-99fb9dad59a6","Type":"ContainerDied","Data":"75ddae5522292d4898a026f3c65330438de491f69d6d3417fb97247ffd53a31a"} Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.115141 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" event={"ID":"d8c62c98-9304-412b-9738-99fb9dad59a6","Type":"ContainerDied","Data":"25fca2b69e9b299f9801e38a86602078214cde7e8a846e1e5bced8919d965efa"} Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.117178 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25fca2b69e9b299f9801e38a86602078214cde7e8a846e1e5bced8919d965efa" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.236382 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.360028 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6c8c6\" (UniqueName: \"kubernetes.io/projected/d8c62c98-9304-412b-9738-99fb9dad59a6-kube-api-access-6c8c6\") pod \"d8c62c98-9304-412b-9738-99fb9dad59a6\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.360087 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ssh-key\") pod \"d8c62c98-9304-412b-9738-99fb9dad59a6\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.360144 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-agent-neutron-config-0\") pod \"d8c62c98-9304-412b-9738-99fb9dad59a6\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.360177 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-inventory\") pod \"d8c62c98-9304-412b-9738-99fb9dad59a6\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.360304 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-combined-ca-bundle\") pod \"d8c62c98-9304-412b-9738-99fb9dad59a6\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.360375 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ceph\") pod \"d8c62c98-9304-412b-9738-99fb9dad59a6\" (UID: \"d8c62c98-9304-412b-9738-99fb9dad59a6\") " Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.365798 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8c62c98-9304-412b-9738-99fb9dad59a6-kube-api-access-6c8c6" (OuterVolumeSpecName: "kube-api-access-6c8c6") pod "d8c62c98-9304-412b-9738-99fb9dad59a6" (UID: "d8c62c98-9304-412b-9738-99fb9dad59a6"). InnerVolumeSpecName "kube-api-access-6c8c6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.367143 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "d8c62c98-9304-412b-9738-99fb9dad59a6" (UID: "d8c62c98-9304-412b-9738-99fb9dad59a6"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.370054 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ceph" (OuterVolumeSpecName: "ceph") pod "d8c62c98-9304-412b-9738-99fb9dad59a6" (UID: "d8c62c98-9304-412b-9738-99fb9dad59a6"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.391960 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-inventory" (OuterVolumeSpecName: "inventory") pod "d8c62c98-9304-412b-9738-99fb9dad59a6" (UID: "d8c62c98-9304-412b-9738-99fb9dad59a6"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.398157 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "d8c62c98-9304-412b-9738-99fb9dad59a6" (UID: "d8c62c98-9304-412b-9738-99fb9dad59a6"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.400147 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d8c62c98-9304-412b-9738-99fb9dad59a6" (UID: "d8c62c98-9304-412b-9738-99fb9dad59a6"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.462809 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6c8c6\" (UniqueName: \"kubernetes.io/projected/d8c62c98-9304-412b-9738-99fb9dad59a6-kube-api-access-6c8c6\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.462838 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.462848 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.462857 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.462866 4857 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:10 crc kubenswrapper[4857]: I1128 16:09:10.462875 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/d8c62c98-9304-412b-9738-99fb9dad59a6-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:11 crc kubenswrapper[4857]: I1128 16:09:11.129442 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-hwdxh" Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.155606 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.156375 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="bfd140a1-03fc-4dc7-9017-ab03325863e3" containerName="nova-cell0-conductor-conductor" containerID="cri-o://f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2" gracePeriod=30 Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.189477 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.189721 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="41e24999-304b-407e-bc06-e9b21de89249" containerName="nova-cell1-conductor-conductor" containerID="cri-o://aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76" gracePeriod=30 Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.841199 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.841429 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-log" containerID="cri-o://5420d7647e4cd92b8d2b3546873c0b0d3ce467e1452887f1299f47651d3525bc" gracePeriod=30 Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.841589 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-api" containerID="cri-o://b5d696117642bc1f98c754b7864abc086a755e8fb92918f8591d2e1ce6393d45" gracePeriod=30 Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.865849 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.866095 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerName="nova-scheduler-scheduler" containerID="cri-o://4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" gracePeriod=30 Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.908645 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.908880 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-log" containerID="cri-o://bcfcd63b811032d62e3e2e24a1e7da6a701d8fd597f92bc88db9e950d0a5004c" gracePeriod=30 Nov 28 16:09:17 crc kubenswrapper[4857]: I1128 16:09:17.908960 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-metadata" containerID="cri-o://5acc0626a52bad7524203ca02e10c4c0fffa2729264edbdfe63e3c5d3e7aa2b9" gracePeriod=30 Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.209103 4857 generic.go:334] "Generic (PLEG): container finished" podID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerID="5420d7647e4cd92b8d2b3546873c0b0d3ce467e1452887f1299f47651d3525bc" exitCode=143 Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.209249 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b","Type":"ContainerDied","Data":"5420d7647e4cd92b8d2b3546873c0b0d3ce467e1452887f1299f47651d3525bc"} Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.212058 4857 generic.go:334] "Generic (PLEG): container finished" podID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerID="bcfcd63b811032d62e3e2e24a1e7da6a701d8fd597f92bc88db9e950d0a5004c" exitCode=143 Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.212095 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2eff5643-12a8-4549-a2f3-19aee5ea63b4","Type":"ContainerDied","Data":"bcfcd63b811032d62e3e2e24a1e7da6a701d8fd597f92bc88db9e950d0a5004c"} Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.639994 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.747431 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-config-data\") pod \"41e24999-304b-407e-bc06-e9b21de89249\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.747556 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jtvkh\" (UniqueName: \"kubernetes.io/projected/41e24999-304b-407e-bc06-e9b21de89249-kube-api-access-jtvkh\") pod \"41e24999-304b-407e-bc06-e9b21de89249\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.747622 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-combined-ca-bundle\") pod \"41e24999-304b-407e-bc06-e9b21de89249\" (UID: \"41e24999-304b-407e-bc06-e9b21de89249\") " Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.754874 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41e24999-304b-407e-bc06-e9b21de89249-kube-api-access-jtvkh" (OuterVolumeSpecName: "kube-api-access-jtvkh") pod "41e24999-304b-407e-bc06-e9b21de89249" (UID: "41e24999-304b-407e-bc06-e9b21de89249"). InnerVolumeSpecName "kube-api-access-jtvkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.797867 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "41e24999-304b-407e-bc06-e9b21de89249" (UID: "41e24999-304b-407e-bc06-e9b21de89249"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.798659 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-config-data" (OuterVolumeSpecName: "config-data") pod "41e24999-304b-407e-bc06-e9b21de89249" (UID: "41e24999-304b-407e-bc06-e9b21de89249"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.851325 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.851358 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jtvkh\" (UniqueName: \"kubernetes.io/projected/41e24999-304b-407e-bc06-e9b21de89249-kube-api-access-jtvkh\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:18 crc kubenswrapper[4857]: I1128 16:09:18.851369 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/41e24999-304b-407e-bc06-e9b21de89249-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.080711 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.082143 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.083356 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.083403 4857 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerName="nova-scheduler-scheduler" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.224427 4857 generic.go:334] "Generic (PLEG): container finished" podID="41e24999-304b-407e-bc06-e9b21de89249" containerID="aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76" exitCode=0 Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.224505 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.224512 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"41e24999-304b-407e-bc06-e9b21de89249","Type":"ContainerDied","Data":"aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76"} Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.224903 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"41e24999-304b-407e-bc06-e9b21de89249","Type":"ContainerDied","Data":"21a55d84d2fdfae0795d4ab847143bfcca93fa09a4230db68de7d5e7688c4a8a"} Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.224923 4857 scope.go:117] "RemoveContainer" containerID="aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.355079 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.370161 4857 scope.go:117] "RemoveContainer" containerID="aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76" Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.370538 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76\": container with ID starting with aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76 not found: ID does not exist" containerID="aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.370604 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76"} err="failed to get container status \"aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76\": rpc error: code = NotFound desc = could not find container \"aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76\": container with ID starting with aa9181b1906020a1865b3e399b7707ec0c9ead604e46c0e06904c9fbf81c7e76 not found: ID does not exist" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.382158 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.400651 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.401265 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41e24999-304b-407e-bc06-e9b21de89249" containerName="nova-cell1-conductor-conductor" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401282 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="41e24999-304b-407e-bc06-e9b21de89249" containerName="nova-cell1-conductor-conductor" Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.401305 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8c62c98-9304-412b-9738-99fb9dad59a6" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401314 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8c62c98-9304-412b-9738-99fb9dad59a6" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.401347 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="extract-content" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401356 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="extract-content" Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.401381 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="registry-server" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401390 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="registry-server" Nov 28 16:09:19 crc kubenswrapper[4857]: E1128 16:09:19.401409 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="extract-utilities" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401418 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="extract-utilities" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401656 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="41e24999-304b-407e-bc06-e9b21de89249" containerName="nova-cell1-conductor-conductor" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401680 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8c62c98-9304-412b-9738-99fb9dad59a6" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.401724 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="69edcff5-bd9b-411d-a0d6-c8d104e034ae" containerName="registry-server" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.402921 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.407800 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.410316 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.463681 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hznhz\" (UniqueName: \"kubernetes.io/projected/c5b22e7a-87b1-421d-9c28-35afaefb3808-kube-api-access-hznhz\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.464178 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b22e7a-87b1-421d-9c28-35afaefb3808-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.464255 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b22e7a-87b1-421d-9c28-35afaefb3808-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.566315 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b22e7a-87b1-421d-9c28-35afaefb3808-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.566367 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b22e7a-87b1-421d-9c28-35afaefb3808-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.566519 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hznhz\" (UniqueName: \"kubernetes.io/projected/c5b22e7a-87b1-421d-9c28-35afaefb3808-kube-api-access-hznhz\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.579301 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5b22e7a-87b1-421d-9c28-35afaefb3808-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.581816 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5b22e7a-87b1-421d-9c28-35afaefb3808-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.583046 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hznhz\" (UniqueName: \"kubernetes.io/projected/c5b22e7a-87b1-421d-9c28-35afaefb3808-kube-api-access-hznhz\") pod \"nova-cell1-conductor-0\" (UID: \"c5b22e7a-87b1-421d-9c28-35afaefb3808\") " pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.677327 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.732670 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.770178 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-combined-ca-bundle\") pod \"bfd140a1-03fc-4dc7-9017-ab03325863e3\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.771105 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-96z2c\" (UniqueName: \"kubernetes.io/projected/bfd140a1-03fc-4dc7-9017-ab03325863e3-kube-api-access-96z2c\") pod \"bfd140a1-03fc-4dc7-9017-ab03325863e3\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.772163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-config-data\") pod \"bfd140a1-03fc-4dc7-9017-ab03325863e3\" (UID: \"bfd140a1-03fc-4dc7-9017-ab03325863e3\") " Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.775421 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd140a1-03fc-4dc7-9017-ab03325863e3-kube-api-access-96z2c" (OuterVolumeSpecName: "kube-api-access-96z2c") pod "bfd140a1-03fc-4dc7-9017-ab03325863e3" (UID: "bfd140a1-03fc-4dc7-9017-ab03325863e3"). InnerVolumeSpecName "kube-api-access-96z2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.807279 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-config-data" (OuterVolumeSpecName: "config-data") pod "bfd140a1-03fc-4dc7-9017-ab03325863e3" (UID: "bfd140a1-03fc-4dc7-9017-ab03325863e3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.813126 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bfd140a1-03fc-4dc7-9017-ab03325863e3" (UID: "bfd140a1-03fc-4dc7-9017-ab03325863e3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.875988 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.876022 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-96z2c\" (UniqueName: \"kubernetes.io/projected/bfd140a1-03fc-4dc7-9017-ab03325863e3-kube-api-access-96z2c\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:19 crc kubenswrapper[4857]: I1128 16:09:19.876037 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bfd140a1-03fc-4dc7-9017-ab03325863e3-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.179532 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.252278 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41e24999-304b-407e-bc06-e9b21de89249" path="/var/lib/kubelet/pods/41e24999-304b-407e-bc06-e9b21de89249/volumes" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.255336 4857 generic.go:334] "Generic (PLEG): container finished" podID="bfd140a1-03fc-4dc7-9017-ab03325863e3" containerID="f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2" exitCode=0 Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.255397 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.255413 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfd140a1-03fc-4dc7-9017-ab03325863e3","Type":"ContainerDied","Data":"f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2"} Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.255442 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"bfd140a1-03fc-4dc7-9017-ab03325863e3","Type":"ContainerDied","Data":"27eece7c6f76a69638d5dacd410790b626b3124e85affa30966dd9cc03dfde21"} Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.255460 4857 scope.go:117] "RemoveContainer" containerID="f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.262134 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c5b22e7a-87b1-421d-9c28-35afaefb3808","Type":"ContainerStarted","Data":"9365309b58795a0b289fb0c0beb56441de6a1a7e63642ce0265f5dc442bd49c0"} Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.339604 4857 scope.go:117] "RemoveContainer" containerID="f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2" Nov 28 16:09:20 crc kubenswrapper[4857]: E1128 16:09:20.340055 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2\": container with ID starting with f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2 not found: ID does not exist" containerID="f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.340096 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2"} err="failed to get container status \"f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2\": rpc error: code = NotFound desc = could not find container \"f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2\": container with ID starting with f54fca0d7aec2bc6af751f06180b10539c38e37e676af579284c9e9184f71ae2 not found: ID does not exist" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.381752 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.397107 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.407547 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:09:20 crc kubenswrapper[4857]: E1128 16:09:20.408036 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd140a1-03fc-4dc7-9017-ab03325863e3" containerName="nova-cell0-conductor-conductor" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.408052 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd140a1-03fc-4dc7-9017-ab03325863e3" containerName="nova-cell0-conductor-conductor" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.408259 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd140a1-03fc-4dc7-9017-ab03325863e3" containerName="nova-cell0-conductor-conductor" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.409028 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.411140 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.420619 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.493009 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nrwpf\" (UniqueName: \"kubernetes.io/projected/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-kube-api-access-nrwpf\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.493122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.493175 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.595468 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.595540 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.595682 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nrwpf\" (UniqueName: \"kubernetes.io/projected/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-kube-api-access-nrwpf\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.599868 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.602041 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.610677 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nrwpf\" (UniqueName: \"kubernetes.io/projected/2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d-kube-api-access-nrwpf\") pod \"nova-cell0-conductor-0\" (UID: \"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d\") " pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:20 crc kubenswrapper[4857]: I1128 16:09:20.731507 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:21 crc kubenswrapper[4857]: I1128 16:09:21.222613 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 28 16:09:21 crc kubenswrapper[4857]: I1128 16:09:21.279169 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"c5b22e7a-87b1-421d-9c28-35afaefb3808","Type":"ContainerStarted","Data":"0344bcd46d64d00b0f472d7400ed1ea4556d7e346be26a9af18259d768543eef"} Nov 28 16:09:21 crc kubenswrapper[4857]: I1128 16:09:21.279384 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:21 crc kubenswrapper[4857]: I1128 16:09:21.299169 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.299152856 podStartE2EDuration="2.299152856s" podCreationTimestamp="2025-11-28 16:09:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:09:21.293721022 +0000 UTC m=+9611.417662469" watchObservedRunningTime="2025-11-28 16:09:21.299152856 +0000 UTC m=+9611.423094293" Nov 28 16:09:21 crc kubenswrapper[4857]: I1128 16:09:21.347614 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.89:8775/\": dial tcp 10.217.1.89:8775: connect: connection refused" Nov 28 16:09:21 crc kubenswrapper[4857]: I1128 16:09:21.347660 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.89:8775/\": dial tcp 10.217.1.89:8775: connect: connection refused" Nov 28 16:09:21 crc kubenswrapper[4857]: W1128 16:09:21.589136 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2df57cfe_5ac9_4a8c_83c9_f3bad8317f1d.slice/crio-1c58c02b013359fb4f3717e6ba495b36f25c11e91cddf0e8c0a30e01e139c392 WatchSource:0}: Error finding container 1c58c02b013359fb4f3717e6ba495b36f25c11e91cddf0e8c0a30e01e139c392: Status 404 returned error can't find the container with id 1c58c02b013359fb4f3717e6ba495b36f25c11e91cddf0e8c0a30e01e139c392 Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.243756 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfd140a1-03fc-4dc7-9017-ab03325863e3" path="/var/lib/kubelet/pods/bfd140a1-03fc-4dc7-9017-ab03325863e3/volumes" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.290739 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d","Type":"ContainerStarted","Data":"673927f94258d93ed9247ca175c9ddfa5f6b9dbd35637a04ccafbb08df0a00a4"} Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.290812 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d","Type":"ContainerStarted","Data":"1c58c02b013359fb4f3717e6ba495b36f25c11e91cddf0e8c0a30e01e139c392"} Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.292035 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.296029 4857 generic.go:334] "Generic (PLEG): container finished" podID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerID="b5d696117642bc1f98c754b7864abc086a755e8fb92918f8591d2e1ce6393d45" exitCode=0 Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.296103 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b","Type":"ContainerDied","Data":"b5d696117642bc1f98c754b7864abc086a755e8fb92918f8591d2e1ce6393d45"} Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.296300 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b","Type":"ContainerDied","Data":"acf914881e963d4d39f552e6f129cbe6c82960bb8a72b49c7f4b74f028de32f9"} Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.296385 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="acf914881e963d4d39f552e6f129cbe6c82960bb8a72b49c7f4b74f028de32f9" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.298666 4857 generic.go:334] "Generic (PLEG): container finished" podID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerID="5acc0626a52bad7524203ca02e10c4c0fffa2729264edbdfe63e3c5d3e7aa2b9" exitCode=0 Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.299450 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2eff5643-12a8-4549-a2f3-19aee5ea63b4","Type":"ContainerDied","Data":"5acc0626a52bad7524203ca02e10c4c0fffa2729264edbdfe63e3c5d3e7aa2b9"} Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.315500 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.315484896 podStartE2EDuration="2.315484896s" podCreationTimestamp="2025-11-28 16:09:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:09:22.310299519 +0000 UTC m=+9612.434240956" watchObservedRunningTime="2025-11-28 16:09:22.315484896 +0000 UTC m=+9612.439426333" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.357214 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.367608 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440168 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2eff5643-12a8-4549-a2f3-19aee5ea63b4-logs\") pod \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440235 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-config-data\") pod \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440264 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-config-data\") pod \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440328 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-combined-ca-bundle\") pod \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440407 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vp6kv\" (UniqueName: \"kubernetes.io/projected/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-kube-api-access-vp6kv\") pod \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440473 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-64rk6\" (UniqueName: \"kubernetes.io/projected/2eff5643-12a8-4549-a2f3-19aee5ea63b4-kube-api-access-64rk6\") pod \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\" (UID: \"2eff5643-12a8-4549-a2f3-19aee5ea63b4\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440498 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-logs\") pod \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.440601 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-combined-ca-bundle\") pod \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\" (UID: \"2c9c9b78-b11f-4e58-a502-3c1f4c3e837b\") " Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.442718 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2eff5643-12a8-4549-a2f3-19aee5ea63b4-logs" (OuterVolumeSpecName: "logs") pod "2eff5643-12a8-4549-a2f3-19aee5ea63b4" (UID: "2eff5643-12a8-4549-a2f3-19aee5ea63b4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.450809 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-logs" (OuterVolumeSpecName: "logs") pod "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" (UID: "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.451437 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-kube-api-access-vp6kv" (OuterVolumeSpecName: "kube-api-access-vp6kv") pod "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" (UID: "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b"). InnerVolumeSpecName "kube-api-access-vp6kv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.491605 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2eff5643-12a8-4549-a2f3-19aee5ea63b4-kube-api-access-64rk6" (OuterVolumeSpecName: "kube-api-access-64rk6") pod "2eff5643-12a8-4549-a2f3-19aee5ea63b4" (UID: "2eff5643-12a8-4549-a2f3-19aee5ea63b4"). InnerVolumeSpecName "kube-api-access-64rk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.519306 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-config-data" (OuterVolumeSpecName: "config-data") pod "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" (UID: "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.520560 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2eff5643-12a8-4549-a2f3-19aee5ea63b4" (UID: "2eff5643-12a8-4549-a2f3-19aee5ea63b4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.531190 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-config-data" (OuterVolumeSpecName: "config-data") pod "2eff5643-12a8-4549-a2f3-19aee5ea63b4" (UID: "2eff5643-12a8-4549-a2f3-19aee5ea63b4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.531367 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" (UID: "2c9c9b78-b11f-4e58-a502-3c1f4c3e837b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544619 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544681 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2eff5643-12a8-4549-a2f3-19aee5ea63b4-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544696 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544707 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544718 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2eff5643-12a8-4549-a2f3-19aee5ea63b4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544728 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vp6kv\" (UniqueName: \"kubernetes.io/projected/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-kube-api-access-vp6kv\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544766 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-64rk6\" (UniqueName: \"kubernetes.io/projected/2eff5643-12a8-4549-a2f3-19aee5ea63b4-kube-api-access-64rk6\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:22 crc kubenswrapper[4857]: I1128 16:09:22.544778 4857 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b-logs\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.310690 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.310861 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2eff5643-12a8-4549-a2f3-19aee5ea63b4","Type":"ContainerDied","Data":"7f50e01d4adba1aed9bd2dcd6a5b660a397bb0c5622c27b71a521c7cf47cef74"} Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.318189 4857 scope.go:117] "RemoveContainer" containerID="5acc0626a52bad7524203ca02e10c4c0fffa2729264edbdfe63e3c5d3e7aa2b9" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.310936 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.688888 4857 scope.go:117] "RemoveContainer" containerID="bcfcd63b811032d62e3e2e24a1e7da6a701d8fd597f92bc88db9e950d0a5004c" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.700471 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.722015 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.745811 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.786757 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.799535 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: E1128 16:09:23.799893 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-api" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.799907 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-api" Nov 28 16:09:23 crc kubenswrapper[4857]: E1128 16:09:23.799943 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-log" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.799964 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-log" Nov 28 16:09:23 crc kubenswrapper[4857]: E1128 16:09:23.799976 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-metadata" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.799983 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-metadata" Nov 28 16:09:23 crc kubenswrapper[4857]: E1128 16:09:23.800001 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-log" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.800008 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-log" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.800184 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-log" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.800195 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" containerName="nova-metadata-metadata" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.800215 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-api" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.800227 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" containerName="nova-api-log" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.801340 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.804278 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.821463 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.823325 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.827435 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.861320 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.881327 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972317 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqfrf\" (UniqueName: \"kubernetes.io/projected/f18de81f-0f74-4296-b62e-f1aea60e4f67-kube-api-access-lqfrf\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972368 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f18de81f-0f74-4296-b62e-f1aea60e4f67-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972429 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a02553cd-561a-4d5a-a986-076342a5430b-config-data\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972473 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a02553cd-561a-4d5a-a986-076342a5430b-logs\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972535 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f18de81f-0f74-4296-b62e-f1aea60e4f67-config-data\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972577 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02553cd-561a-4d5a-a986-076342a5430b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972613 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f18de81f-0f74-4296-b62e-f1aea60e4f67-logs\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:23 crc kubenswrapper[4857]: I1128 16:09:23.972691 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trmbz\" (UniqueName: \"kubernetes.io/projected/a02553cd-561a-4d5a-a986-076342a5430b-kube-api-access-trmbz\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.074539 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f18de81f-0f74-4296-b62e-f1aea60e4f67-config-data\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.074639 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02553cd-561a-4d5a-a986-076342a5430b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.074692 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f18de81f-0f74-4296-b62e-f1aea60e4f67-logs\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.074823 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trmbz\" (UniqueName: \"kubernetes.io/projected/a02553cd-561a-4d5a-a986-076342a5430b-kube-api-access-trmbz\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.074930 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqfrf\" (UniqueName: \"kubernetes.io/projected/f18de81f-0f74-4296-b62e-f1aea60e4f67-kube-api-access-lqfrf\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.074990 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f18de81f-0f74-4296-b62e-f1aea60e4f67-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.075076 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a02553cd-561a-4d5a-a986-076342a5430b-config-data\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.075130 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a02553cd-561a-4d5a-a986-076342a5430b-logs\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.075759 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f18de81f-0f74-4296-b62e-f1aea60e4f67-logs\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.075903 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a02553cd-561a-4d5a-a986-076342a5430b-logs\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: E1128 16:09:24.078756 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d is running failed: container process not found" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:09:24 crc kubenswrapper[4857]: E1128 16:09:24.079155 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d is running failed: container process not found" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:09:24 crc kubenswrapper[4857]: E1128 16:09:24.079427 4857 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d is running failed: container process not found" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 28 16:09:24 crc kubenswrapper[4857]: E1128 16:09:24.079488 4857 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerName="nova-scheduler-scheduler" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.083192 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f18de81f-0f74-4296-b62e-f1aea60e4f67-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.083521 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a02553cd-561a-4d5a-a986-076342a5430b-config-data\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.083792 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a02553cd-561a-4d5a-a986-076342a5430b-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.085503 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f18de81f-0f74-4296-b62e-f1aea60e4f67-config-data\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.093140 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqfrf\" (UniqueName: \"kubernetes.io/projected/f18de81f-0f74-4296-b62e-f1aea60e4f67-kube-api-access-lqfrf\") pod \"nova-api-0\" (UID: \"f18de81f-0f74-4296-b62e-f1aea60e4f67\") " pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.094472 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trmbz\" (UniqueName: \"kubernetes.io/projected/a02553cd-561a-4d5a-a986-076342a5430b-kube-api-access-trmbz\") pod \"nova-metadata-0\" (UID: \"a02553cd-561a-4d5a-a986-076342a5430b\") " pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.118070 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.184891 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.210780 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.287316 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c9c9b78-b11f-4e58-a502-3c1f4c3e837b" path="/var/lib/kubelet/pods/2c9c9b78-b11f-4e58-a502-3c1f4c3e837b/volumes" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.298383 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2eff5643-12a8-4549-a2f3-19aee5ea63b4" path="/var/lib/kubelet/pods/2eff5643-12a8-4549-a2f3-19aee5ea63b4/volumes" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.322916 4857 generic.go:334] "Generic (PLEG): container finished" podID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" exitCode=0 Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.323350 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.323865 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4d58ec9e-5301-4a49-9c34-2704d8db30e1","Type":"ContainerDied","Data":"4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d"} Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.323894 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"4d58ec9e-5301-4a49-9c34-2704d8db30e1","Type":"ContainerDied","Data":"a246239882f35af31953de7f0e268f4db2aed2f0db3e3c0c9df10652720daf5c"} Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.323912 4857 scope.go:117] "RemoveContainer" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.360300 4857 scope.go:117] "RemoveContainer" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" Nov 28 16:09:24 crc kubenswrapper[4857]: E1128 16:09:24.365222 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d\": container with ID starting with 4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d not found: ID does not exist" containerID="4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.365265 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d"} err="failed to get container status \"4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d\": rpc error: code = NotFound desc = could not find container \"4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d\": container with ID starting with 4c379d2e02f9f431f0d148a286cc6beb61a233eed5a36f9bda1732d27131d12d not found: ID does not exist" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.387404 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-combined-ca-bundle\") pod \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.387505 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-config-data\") pod \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.387533 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwfhs\" (UniqueName: \"kubernetes.io/projected/4d58ec9e-5301-4a49-9c34-2704d8db30e1-kube-api-access-dwfhs\") pod \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\" (UID: \"4d58ec9e-5301-4a49-9c34-2704d8db30e1\") " Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.392065 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d58ec9e-5301-4a49-9c34-2704d8db30e1-kube-api-access-dwfhs" (OuterVolumeSpecName: "kube-api-access-dwfhs") pod "4d58ec9e-5301-4a49-9c34-2704d8db30e1" (UID: "4d58ec9e-5301-4a49-9c34-2704d8db30e1"). InnerVolumeSpecName "kube-api-access-dwfhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.416050 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4d58ec9e-5301-4a49-9c34-2704d8db30e1" (UID: "4d58ec9e-5301-4a49-9c34-2704d8db30e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.416419 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-config-data" (OuterVolumeSpecName: "config-data") pod "4d58ec9e-5301-4a49-9c34-2704d8db30e1" (UID: "4d58ec9e-5301-4a49-9c34-2704d8db30e1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.490612 4857 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.490890 4857 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d58ec9e-5301-4a49-9c34-2704d8db30e1-config-data\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.490932 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwfhs\" (UniqueName: \"kubernetes.io/projected/4d58ec9e-5301-4a49-9c34-2704d8db30e1-kube-api-access-dwfhs\") on node \"crc\" DevicePath \"\"" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.631132 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 28 16:09:24 crc kubenswrapper[4857]: W1128 16:09:24.633046 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda02553cd_561a_4d5a_a986_076342a5430b.slice/crio-072f8006ccd71244ab99b766c666707a8e12dc5d439b1ab3161da9033fe19920 WatchSource:0}: Error finding container 072f8006ccd71244ab99b766c666707a8e12dc5d439b1ab3161da9033fe19920: Status 404 returned error can't find the container with id 072f8006ccd71244ab99b766c666707a8e12dc5d439b1ab3161da9033fe19920 Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.733751 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.752127 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.770906 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:09:24 crc kubenswrapper[4857]: E1128 16:09:24.771577 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerName="nova-scheduler-scheduler" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.771655 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerName="nova-scheduler-scheduler" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.771953 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" containerName="nova-scheduler-scheduler" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.772855 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.775270 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.784007 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:09:24 crc kubenswrapper[4857]: W1128 16:09:24.792019 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf18de81f_0f74_4296_b62e_f1aea60e4f67.slice/crio-2c3c45f0497114cb5f0637a441677cebc8e094f583787e745a65744db2e84e49 WatchSource:0}: Error finding container 2c3c45f0497114cb5f0637a441677cebc8e094f583787e745a65744db2e84e49: Status 404 returned error can't find the container with id 2c3c45f0497114cb5f0637a441677cebc8e094f583787e745a65744db2e84e49 Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.803121 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.834808 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ddd466-5f9f-4920-a84a-63ad934e4e74-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.834965 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ddd466-5f9f-4920-a84a-63ad934e4e74-config-data\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.835000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgg2b\" (UniqueName: \"kubernetes.io/projected/b4ddd466-5f9f-4920-a84a-63ad934e4e74-kube-api-access-vgg2b\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.937409 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ddd466-5f9f-4920-a84a-63ad934e4e74-config-data\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.937769 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgg2b\" (UniqueName: \"kubernetes.io/projected/b4ddd466-5f9f-4920-a84a-63ad934e4e74-kube-api-access-vgg2b\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.938033 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ddd466-5f9f-4920-a84a-63ad934e4e74-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.943549 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4ddd466-5f9f-4920-a84a-63ad934e4e74-config-data\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.943567 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4ddd466-5f9f-4920-a84a-63ad934e4e74-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:24 crc kubenswrapper[4857]: I1128 16:09:24.955104 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgg2b\" (UniqueName: \"kubernetes.io/projected/b4ddd466-5f9f-4920-a84a-63ad934e4e74-kube-api-access-vgg2b\") pod \"nova-scheduler-0\" (UID: \"b4ddd466-5f9f-4920-a84a-63ad934e4e74\") " pod="openstack/nova-scheduler-0" Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.116025 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.338404 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f18de81f-0f74-4296-b62e-f1aea60e4f67","Type":"ContainerStarted","Data":"afa99a12c8718fd6f87c1aae678b41fc3da9eef833f1895a6a33b9c2603d3dec"} Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.338721 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f18de81f-0f74-4296-b62e-f1aea60e4f67","Type":"ContainerStarted","Data":"82f01ebcbd8ea7f34b32297469302aacc179214a9178cbe9b9db0b83b600e940"} Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.338736 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f18de81f-0f74-4296-b62e-f1aea60e4f67","Type":"ContainerStarted","Data":"2c3c45f0497114cb5f0637a441677cebc8e094f583787e745a65744db2e84e49"} Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.342700 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a02553cd-561a-4d5a-a986-076342a5430b","Type":"ContainerStarted","Data":"e1676f68299446e96147a304dd18d59c10b9478e281cba149952ecf1ea837c00"} Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.342748 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a02553cd-561a-4d5a-a986-076342a5430b","Type":"ContainerStarted","Data":"b0a0a8d805e87e237cdf774aa715c94ef6e32e4eb7d1ecfe20d52a94240ec06a"} Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.342762 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a02553cd-561a-4d5a-a986-076342a5430b","Type":"ContainerStarted","Data":"072f8006ccd71244ab99b766c666707a8e12dc5d439b1ab3161da9033fe19920"} Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.371477 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.371450901 podStartE2EDuration="2.371450901s" podCreationTimestamp="2025-11-28 16:09:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:09:25.354295427 +0000 UTC m=+9615.478236864" watchObservedRunningTime="2025-11-28 16:09:25.371450901 +0000 UTC m=+9615.495392338" Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.380316 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.380294744 podStartE2EDuration="2.380294744s" podCreationTimestamp="2025-11-28 16:09:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:09:25.371831631 +0000 UTC m=+9615.495773068" watchObservedRunningTime="2025-11-28 16:09:25.380294744 +0000 UTC m=+9615.504236181" Nov 28 16:09:25 crc kubenswrapper[4857]: I1128 16:09:25.622582 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 28 16:09:26 crc kubenswrapper[4857]: I1128 16:09:26.253802 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d58ec9e-5301-4a49-9c34-2704d8db30e1" path="/var/lib/kubelet/pods/4d58ec9e-5301-4a49-9c34-2704d8db30e1/volumes" Nov 28 16:09:26 crc kubenswrapper[4857]: I1128 16:09:26.367386 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b4ddd466-5f9f-4920-a84a-63ad934e4e74","Type":"ContainerStarted","Data":"a34677eb8f50accca1d49df98d932dcc664805e914d7a345019da084e0e95cce"} Nov 28 16:09:26 crc kubenswrapper[4857]: I1128 16:09:26.367504 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b4ddd466-5f9f-4920-a84a-63ad934e4e74","Type":"ContainerStarted","Data":"e495edb60b66325c2b953aa05f090cf1b34a19a20e268d166f1af5fdaaddac8d"} Nov 28 16:09:26 crc kubenswrapper[4857]: I1128 16:09:26.400601 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.400571119 podStartE2EDuration="2.400571119s" podCreationTimestamp="2025-11-28 16:09:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-28 16:09:26.382199673 +0000 UTC m=+9616.506141180" watchObservedRunningTime="2025-11-28 16:09:26.400571119 +0000 UTC m=+9616.524512596" Nov 28 16:09:29 crc kubenswrapper[4857]: I1128 16:09:29.118686 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:09:29 crc kubenswrapper[4857]: I1128 16:09:29.119240 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 28 16:09:29 crc kubenswrapper[4857]: I1128 16:09:29.769225 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 28 16:09:30 crc kubenswrapper[4857]: I1128 16:09:30.116296 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 28 16:09:30 crc kubenswrapper[4857]: I1128 16:09:30.783526 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 28 16:09:32 crc kubenswrapper[4857]: I1128 16:09:32.908768 4857 scope.go:117] "RemoveContainer" containerID="b5d696117642bc1f98c754b7864abc086a755e8fb92918f8591d2e1ce6393d45" Nov 28 16:09:32 crc kubenswrapper[4857]: I1128 16:09:32.945520 4857 scope.go:117] "RemoveContainer" containerID="5420d7647e4cd92b8d2b3546873c0b0d3ce467e1452887f1299f47651d3525bc" Nov 28 16:09:34 crc kubenswrapper[4857]: I1128 16:09:34.118804 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:09:34 crc kubenswrapper[4857]: I1128 16:09:34.119176 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 28 16:09:34 crc kubenswrapper[4857]: I1128 16:09:34.186327 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:09:34 crc kubenswrapper[4857]: I1128 16:09:34.186400 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.116979 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.201144 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a02553cd-561a-4d5a-a986-076342a5430b" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"http://10.217.1.198:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.201144 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="a02553cd-561a-4d5a-a986-076342a5430b" containerName="nova-metadata-log" probeResult="failure" output="Get \"http://10.217.1.198:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.284175 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f18de81f-0f74-4296-b62e-f1aea60e4f67" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.284289 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f18de81f-0f74-4296-b62e-f1aea60e4f67" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.416775 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 28 16:09:35 crc kubenswrapper[4857]: I1128 16:09:35.530533 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.125125 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.127402 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.129471 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.190718 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.191215 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.191604 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.194782 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.597085 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.599230 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 28 16:09:44 crc kubenswrapper[4857]: I1128 16:09:44.601523 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.853213 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w"] Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.855813 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.858102 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.858502 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-qbvn9" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.858765 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.858816 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.859088 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.859972 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.860314 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.873811 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w"] Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.928503 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.928544 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.928598 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.928889 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.928939 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.929000 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.929021 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xx2gq\" (UniqueName: \"kubernetes.io/projected/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-kube-api-access-xx2gq\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.929075 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.929091 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.929122 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:45 crc kubenswrapper[4857]: I1128 16:09:45.929149 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031001 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031406 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031470 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031583 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031610 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031642 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031671 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xx2gq\" (UniqueName: \"kubernetes.io/projected/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-kube-api-access-xx2gq\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031722 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031744 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031789 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.031829 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.032734 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.032896 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.036513 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.036990 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.040199 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.049556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.050264 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.050523 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.050827 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.052148 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ceph\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.056340 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xx2gq\" (UniqueName: \"kubernetes.io/projected/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-kube-api-access-xx2gq\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.176205 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:09:46 crc kubenswrapper[4857]: I1128 16:09:46.915876 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w"] Nov 28 16:09:47 crc kubenswrapper[4857]: W1128 16:09:47.079154 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6bb7d13b_f954_4298_b9e9_3b5d36f8591c.slice/crio-27a90a2a3485b85a2ec9f00ce07574665aa9bb033a58cb96bfe28c7cd8c25210 WatchSource:0}: Error finding container 27a90a2a3485b85a2ec9f00ce07574665aa9bb033a58cb96bfe28c7cd8c25210: Status 404 returned error can't find the container with id 27a90a2a3485b85a2ec9f00ce07574665aa9bb033a58cb96bfe28c7cd8c25210 Nov 28 16:09:47 crc kubenswrapper[4857]: I1128 16:09:47.627356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" event={"ID":"6bb7d13b-f954-4298-b9e9-3b5d36f8591c","Type":"ContainerStarted","Data":"b77f5276691e4a0a46c40b5e9fc5a8a5b409f7a86e2a83523204568f179b886c"} Nov 28 16:09:47 crc kubenswrapper[4857]: I1128 16:09:47.627658 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" event={"ID":"6bb7d13b-f954-4298-b9e9-3b5d36f8591c","Type":"ContainerStarted","Data":"27a90a2a3485b85a2ec9f00ce07574665aa9bb033a58cb96bfe28c7cd8c25210"} Nov 28 16:09:47 crc kubenswrapper[4857]: I1128 16:09:47.649006 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" podStartSLOduration=2.441305016 podStartE2EDuration="2.648986298s" podCreationTimestamp="2025-11-28 16:09:45 +0000 UTC" firstStartedPulling="2025-11-28 16:09:47.082354112 +0000 UTC m=+9637.206295559" lastFinishedPulling="2025-11-28 16:09:47.290035404 +0000 UTC m=+9637.413976841" observedRunningTime="2025-11-28 16:09:47.645924847 +0000 UTC m=+9637.769866294" watchObservedRunningTime="2025-11-28 16:09:47.648986298 +0000 UTC m=+9637.772927735" Nov 28 16:09:50 crc kubenswrapper[4857]: I1128 16:09:50.324600 4857 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podbfd140a1-03fc-4dc7-9017-ab03325863e3"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podbfd140a1-03fc-4dc7-9017-ab03325863e3] : Timed out while waiting for systemd to remove kubepods-besteffort-podbfd140a1_03fc_4dc7_9017_ab03325863e3.slice" Nov 28 16:11:11 crc kubenswrapper[4857]: I1128 16:11:11.308358 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:11:11 crc kubenswrapper[4857]: I1128 16:11:11.309073 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:11:41 crc kubenswrapper[4857]: I1128 16:11:41.308634 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:11:41 crc kubenswrapper[4857]: I1128 16:11:41.309417 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:12:11 crc kubenswrapper[4857]: I1128 16:12:11.308596 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:12:11 crc kubenswrapper[4857]: I1128 16:12:11.309180 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:12:11 crc kubenswrapper[4857]: I1128 16:12:11.309227 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 16:12:11 crc kubenswrapper[4857]: I1128 16:12:11.310114 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c942c5d609ce7e62c0a6cd31d31c499b688c7fdc91159358f38ec73db2f1a01e"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:12:11 crc kubenswrapper[4857]: I1128 16:12:11.310179 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://c942c5d609ce7e62c0a6cd31d31c499b688c7fdc91159358f38ec73db2f1a01e" gracePeriod=600 Nov 28 16:12:12 crc kubenswrapper[4857]: I1128 16:12:12.297855 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="c942c5d609ce7e62c0a6cd31d31c499b688c7fdc91159358f38ec73db2f1a01e" exitCode=0 Nov 28 16:12:12 crc kubenswrapper[4857]: I1128 16:12:12.297961 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"c942c5d609ce7e62c0a6cd31d31c499b688c7fdc91159358f38ec73db2f1a01e"} Nov 28 16:12:12 crc kubenswrapper[4857]: I1128 16:12:12.298426 4857 scope.go:117] "RemoveContainer" containerID="eae255c7ca23e085c377e42c983c0700a2ad67405174b16d098d016c18acf626" Nov 28 16:12:12 crc kubenswrapper[4857]: I1128 16:12:12.298235 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95"} Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.459588 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sld95"] Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.462973 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.477483 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sld95"] Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.564593 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5d8ph\" (UniqueName: \"kubernetes.io/projected/d8600045-0865-4e5c-95f4-0721a1b8f9a7-kube-api-access-5d8ph\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.564746 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-utilities\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.564797 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-catalog-content\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.666846 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5d8ph\" (UniqueName: \"kubernetes.io/projected/d8600045-0865-4e5c-95f4-0721a1b8f9a7-kube-api-access-5d8ph\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.667049 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-utilities\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.667126 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-catalog-content\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.667809 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-utilities\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:23 crc kubenswrapper[4857]: I1128 16:13:23.667937 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-catalog-content\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:24 crc kubenswrapper[4857]: I1128 16:13:24.283871 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5d8ph\" (UniqueName: \"kubernetes.io/projected/d8600045-0865-4e5c-95f4-0721a1b8f9a7-kube-api-access-5d8ph\") pod \"redhat-marketplace-sld95\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:24 crc kubenswrapper[4857]: I1128 16:13:24.393990 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:24 crc kubenswrapper[4857]: I1128 16:13:24.877197 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sld95"] Nov 28 16:13:25 crc kubenswrapper[4857]: I1128 16:13:25.151108 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sld95" event={"ID":"d8600045-0865-4e5c-95f4-0721a1b8f9a7","Type":"ContainerStarted","Data":"8fa3f931b856566db3d50908b64af5c26bc2b973a46bf6be45b21641102f8756"} Nov 28 16:13:26 crc kubenswrapper[4857]: I1128 16:13:26.164939 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerID="6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210" exitCode=0 Nov 28 16:13:26 crc kubenswrapper[4857]: I1128 16:13:26.165034 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sld95" event={"ID":"d8600045-0865-4e5c-95f4-0721a1b8f9a7","Type":"ContainerDied","Data":"6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210"} Nov 28 16:13:26 crc kubenswrapper[4857]: I1128 16:13:26.170243 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:13:28 crc kubenswrapper[4857]: I1128 16:13:28.189876 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerID="c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202" exitCode=0 Nov 28 16:13:28 crc kubenswrapper[4857]: I1128 16:13:28.190706 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sld95" event={"ID":"d8600045-0865-4e5c-95f4-0721a1b8f9a7","Type":"ContainerDied","Data":"c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202"} Nov 28 16:13:29 crc kubenswrapper[4857]: I1128 16:13:29.204307 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sld95" event={"ID":"d8600045-0865-4e5c-95f4-0721a1b8f9a7","Type":"ContainerStarted","Data":"c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425"} Nov 28 16:13:29 crc kubenswrapper[4857]: I1128 16:13:29.225128 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sld95" podStartSLOduration=3.549638575 podStartE2EDuration="6.225109626s" podCreationTimestamp="2025-11-28 16:13:23 +0000 UTC" firstStartedPulling="2025-11-28 16:13:26.170027825 +0000 UTC m=+9856.293969262" lastFinishedPulling="2025-11-28 16:13:28.845498876 +0000 UTC m=+9858.969440313" observedRunningTime="2025-11-28 16:13:29.221684705 +0000 UTC m=+9859.345626162" watchObservedRunningTime="2025-11-28 16:13:29.225109626 +0000 UTC m=+9859.349051073" Nov 28 16:13:34 crc kubenswrapper[4857]: I1128 16:13:34.395963 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:34 crc kubenswrapper[4857]: I1128 16:13:34.396775 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:34 crc kubenswrapper[4857]: I1128 16:13:34.482642 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:35 crc kubenswrapper[4857]: I1128 16:13:35.329384 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:35 crc kubenswrapper[4857]: I1128 16:13:35.391705 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sld95"] Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.294218 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sld95" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="registry-server" containerID="cri-o://c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425" gracePeriod=2 Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.773492 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.910825 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5d8ph\" (UniqueName: \"kubernetes.io/projected/d8600045-0865-4e5c-95f4-0721a1b8f9a7-kube-api-access-5d8ph\") pod \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.910912 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-catalog-content\") pod \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.911129 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-utilities\") pod \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\" (UID: \"d8600045-0865-4e5c-95f4-0721a1b8f9a7\") " Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.911995 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-utilities" (OuterVolumeSpecName: "utilities") pod "d8600045-0865-4e5c-95f4-0721a1b8f9a7" (UID: "d8600045-0865-4e5c-95f4-0721a1b8f9a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:37 crc kubenswrapper[4857]: I1128 16:13:37.926518 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d8600045-0865-4e5c-95f4-0721a1b8f9a7" (UID: "d8600045-0865-4e5c-95f4-0721a1b8f9a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.013573 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.013618 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d8600045-0865-4e5c-95f4-0721a1b8f9a7-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.305712 4857 generic.go:334] "Generic (PLEG): container finished" podID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerID="c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425" exitCode=0 Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.305758 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sld95" event={"ID":"d8600045-0865-4e5c-95f4-0721a1b8f9a7","Type":"ContainerDied","Data":"c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425"} Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.305780 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sld95" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.305788 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sld95" event={"ID":"d8600045-0865-4e5c-95f4-0721a1b8f9a7","Type":"ContainerDied","Data":"8fa3f931b856566db3d50908b64af5c26bc2b973a46bf6be45b21641102f8756"} Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.305808 4857 scope.go:117] "RemoveContainer" containerID="c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.329796 4857 scope.go:117] "RemoveContainer" containerID="c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.482811 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8600045-0865-4e5c-95f4-0721a1b8f9a7-kube-api-access-5d8ph" (OuterVolumeSpecName: "kube-api-access-5d8ph") pod "d8600045-0865-4e5c-95f4-0721a1b8f9a7" (UID: "d8600045-0865-4e5c-95f4-0721a1b8f9a7"). InnerVolumeSpecName "kube-api-access-5d8ph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.497122 4857 scope.go:117] "RemoveContainer" containerID="6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.525881 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5d8ph\" (UniqueName: \"kubernetes.io/projected/d8600045-0865-4e5c-95f4-0721a1b8f9a7-kube-api-access-5d8ph\") on node \"crc\" DevicePath \"\"" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.593531 4857 scope.go:117] "RemoveContainer" containerID="c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425" Nov 28 16:13:38 crc kubenswrapper[4857]: E1128 16:13:38.594114 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425\": container with ID starting with c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425 not found: ID does not exist" containerID="c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.594156 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425"} err="failed to get container status \"c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425\": rpc error: code = NotFound desc = could not find container \"c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425\": container with ID starting with c7251036860d2a38e7ba3ad3c874a288aef11cec9a0333fcc9acd60990e83425 not found: ID does not exist" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.594184 4857 scope.go:117] "RemoveContainer" containerID="c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202" Nov 28 16:13:38 crc kubenswrapper[4857]: E1128 16:13:38.594600 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202\": container with ID starting with c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202 not found: ID does not exist" containerID="c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.594654 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202"} err="failed to get container status \"c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202\": rpc error: code = NotFound desc = could not find container \"c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202\": container with ID starting with c16178c0dcd77d705ace36de1409f121ab5f87cbd8ad7904bef6e88c02853202 not found: ID does not exist" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.594686 4857 scope.go:117] "RemoveContainer" containerID="6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210" Nov 28 16:13:38 crc kubenswrapper[4857]: E1128 16:13:38.595141 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210\": container with ID starting with 6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210 not found: ID does not exist" containerID="6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.595185 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210"} err="failed to get container status \"6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210\": rpc error: code = NotFound desc = could not find container \"6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210\": container with ID starting with 6c69b0592e962664290b81f55ef6d2c91d92744ad6c100ae01d9b3fba5e43210 not found: ID does not exist" Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.653916 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sld95"] Nov 28 16:13:38 crc kubenswrapper[4857]: I1128 16:13:38.666388 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sld95"] Nov 28 16:13:40 crc kubenswrapper[4857]: I1128 16:13:40.250772 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" path="/var/lib/kubelet/pods/d8600045-0865-4e5c-95f4-0721a1b8f9a7/volumes" Nov 28 16:14:11 crc kubenswrapper[4857]: I1128 16:14:11.308981 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:14:11 crc kubenswrapper[4857]: I1128 16:14:11.309916 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:14:41 crc kubenswrapper[4857]: I1128 16:14:41.308795 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:14:41 crc kubenswrapper[4857]: I1128 16:14:41.309725 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.153735 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx"] Nov 28 16:15:00 crc kubenswrapper[4857]: E1128 16:15:00.154794 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.154809 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="extract-utilities" Nov 28 16:15:00 crc kubenswrapper[4857]: E1128 16:15:00.154834 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.154840 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4857]: E1128 16:15:00.154851 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.154858 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="extract-content" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.155101 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8600045-0865-4e5c-95f4-0721a1b8f9a7" containerName="registry-server" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.155918 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.157794 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.158626 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.169497 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx"] Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.188340 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-config-volume\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.188631 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-secret-volume\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.188801 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9g9h\" (UniqueName: \"kubernetes.io/projected/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-kube-api-access-t9g9h\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.291865 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-config-volume\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.292507 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-secret-volume\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.292686 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9g9h\" (UniqueName: \"kubernetes.io/projected/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-kube-api-access-t9g9h\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.294613 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-config-volume\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.304434 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-secret-volume\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.313020 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9g9h\" (UniqueName: \"kubernetes.io/projected/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-kube-api-access-t9g9h\") pod \"collect-profiles-29405775-pswgx\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.504737 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:00 crc kubenswrapper[4857]: I1128 16:15:00.966621 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx"] Nov 28 16:15:01 crc kubenswrapper[4857]: W1128 16:15:01.791093 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5134fc1a_6835_4b5e_b9bf_46a5f9123d7c.slice/crio-f6e54c53c5003772f6221995163a54f50ab0dbf90c2c533867ec27424e6bf3d4 WatchSource:0}: Error finding container f6e54c53c5003772f6221995163a54f50ab0dbf90c2c533867ec27424e6bf3d4: Status 404 returned error can't find the container with id f6e54c53c5003772f6221995163a54f50ab0dbf90c2c533867ec27424e6bf3d4 Nov 28 16:15:02 crc kubenswrapper[4857]: I1128 16:15:02.248295 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" event={"ID":"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c","Type":"ContainerStarted","Data":"f6e54c53c5003772f6221995163a54f50ab0dbf90c2c533867ec27424e6bf3d4"} Nov 28 16:15:03 crc kubenswrapper[4857]: I1128 16:15:03.250584 4857 generic.go:334] "Generic (PLEG): container finished" podID="5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" containerID="01016f0d61dc86e44645f1dacf84301e03b51e2eca1d7002b3ed0682b1a74554" exitCode=0 Nov 28 16:15:03 crc kubenswrapper[4857]: I1128 16:15:03.250696 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" event={"ID":"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c","Type":"ContainerDied","Data":"01016f0d61dc86e44645f1dacf84301e03b51e2eca1d7002b3ed0682b1a74554"} Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.770052 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.892016 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9g9h\" (UniqueName: \"kubernetes.io/projected/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-kube-api-access-t9g9h\") pod \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.892479 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-secret-volume\") pod \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.892542 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-config-volume\") pod \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\" (UID: \"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c\") " Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.894090 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-config-volume" (OuterVolumeSpecName: "config-volume") pod "5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" (UID: "5134fc1a-6835-4b5e-b9bf-46a5f9123d7c"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.900516 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-kube-api-access-t9g9h" (OuterVolumeSpecName: "kube-api-access-t9g9h") pod "5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" (UID: "5134fc1a-6835-4b5e-b9bf-46a5f9123d7c"). InnerVolumeSpecName "kube-api-access-t9g9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.904102 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" (UID: "5134fc1a-6835-4b5e-b9bf-46a5f9123d7c"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.994530 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9g9h\" (UniqueName: \"kubernetes.io/projected/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-kube-api-access-t9g9h\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.994559 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:04 crc kubenswrapper[4857]: I1128 16:15:04.994568 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5134fc1a-6835-4b5e-b9bf-46a5f9123d7c-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:05 crc kubenswrapper[4857]: I1128 16:15:05.272881 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" event={"ID":"5134fc1a-6835-4b5e-b9bf-46a5f9123d7c","Type":"ContainerDied","Data":"f6e54c53c5003772f6221995163a54f50ab0dbf90c2c533867ec27424e6bf3d4"} Nov 28 16:15:05 crc kubenswrapper[4857]: I1128 16:15:05.272926 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6e54c53c5003772f6221995163a54f50ab0dbf90c2c533867ec27424e6bf3d4" Nov 28 16:15:05 crc kubenswrapper[4857]: I1128 16:15:05.273010 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405775-pswgx" Nov 28 16:15:05 crc kubenswrapper[4857]: I1128 16:15:05.849273 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn"] Nov 28 16:15:05 crc kubenswrapper[4857]: I1128 16:15:05.858803 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405730-7kdpn"] Nov 28 16:15:06 crc kubenswrapper[4857]: I1128 16:15:06.242236 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e8306c3-562b-40d1-a682-a6894fc7bc51" path="/var/lib/kubelet/pods/4e8306c3-562b-40d1-a682-a6894fc7bc51/volumes" Nov 28 16:15:11 crc kubenswrapper[4857]: I1128 16:15:11.308846 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:15:11 crc kubenswrapper[4857]: I1128 16:15:11.309521 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:15:11 crc kubenswrapper[4857]: I1128 16:15:11.309580 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 16:15:11 crc kubenswrapper[4857]: I1128 16:15:11.310564 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:15:11 crc kubenswrapper[4857]: I1128 16:15:11.310623 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" gracePeriod=600 Nov 28 16:15:11 crc kubenswrapper[4857]: E1128 16:15:11.520068 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:15:12 crc kubenswrapper[4857]: I1128 16:15:12.365764 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" exitCode=0 Nov 28 16:15:12 crc kubenswrapper[4857]: I1128 16:15:12.365821 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95"} Nov 28 16:15:12 crc kubenswrapper[4857]: I1128 16:15:12.366323 4857 scope.go:117] "RemoveContainer" containerID="c942c5d609ce7e62c0a6cd31d31c499b688c7fdc91159358f38ec73db2f1a01e" Nov 28 16:15:12 crc kubenswrapper[4857]: I1128 16:15:12.369882 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:15:12 crc kubenswrapper[4857]: E1128 16:15:12.371515 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.193389 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-gm8dh"] Nov 28 16:15:22 crc kubenswrapper[4857]: E1128 16:15:22.194379 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" containerName="collect-profiles" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.194390 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" containerName="collect-profiles" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.194599 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="5134fc1a-6835-4b5e-b9bf-46a5f9123d7c" containerName="collect-profiles" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.196185 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.212451 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gm8dh"] Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.299758 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmg49\" (UniqueName: \"kubernetes.io/projected/ae64e071-e073-423f-bf03-00f05e0b28b2-kube-api-access-wmg49\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.299833 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-catalog-content\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.299920 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-utilities\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.399235 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qhlzc"] Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.401531 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.402156 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmg49\" (UniqueName: \"kubernetes.io/projected/ae64e071-e073-423f-bf03-00f05e0b28b2-kube-api-access-wmg49\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.402222 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-catalog-content\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.402261 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-utilities\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.402824 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-utilities\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.402840 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-catalog-content\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.412790 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qhlzc"] Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.422477 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmg49\" (UniqueName: \"kubernetes.io/projected/ae64e071-e073-423f-bf03-00f05e0b28b2-kube-api-access-wmg49\") pod \"community-operators-gm8dh\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.503831 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-catalog-content\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.503873 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-utilities\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.503994 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4m8kj\" (UniqueName: \"kubernetes.io/projected/4ef14e76-3775-4c7b-872f-7557536d531d-kube-api-access-4m8kj\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.519981 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.606075 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-catalog-content\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.606422 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-utilities\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.606523 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-catalog-content\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.606534 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4m8kj\" (UniqueName: \"kubernetes.io/projected/4ef14e76-3775-4c7b-872f-7557536d531d-kube-api-access-4m8kj\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.606920 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-utilities\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.625237 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4m8kj\" (UniqueName: \"kubernetes.io/projected/4ef14e76-3775-4c7b-872f-7557536d531d-kube-api-access-4m8kj\") pod \"certified-operators-qhlzc\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:22 crc kubenswrapper[4857]: I1128 16:15:22.722879 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:23 crc kubenswrapper[4857]: I1128 16:15:23.124660 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-gm8dh"] Nov 28 16:15:23 crc kubenswrapper[4857]: I1128 16:15:23.331523 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qhlzc"] Nov 28 16:15:23 crc kubenswrapper[4857]: W1128 16:15:23.332620 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ef14e76_3775_4c7b_872f_7557536d531d.slice/crio-201528b3880917c9e54202fe21c4cf51891865e1cb2e0e1e9d3a925a327952f6 WatchSource:0}: Error finding container 201528b3880917c9e54202fe21c4cf51891865e1cb2e0e1e9d3a925a327952f6: Status 404 returned error can't find the container with id 201528b3880917c9e54202fe21c4cf51891865e1cb2e0e1e9d3a925a327952f6 Nov 28 16:15:23 crc kubenswrapper[4857]: I1128 16:15:23.485408 4857 generic.go:334] "Generic (PLEG): container finished" podID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerID="8747b166680d6787dbaa96b34ad7627f4425d5461023003cc310db7963cf246b" exitCode=0 Nov 28 16:15:23 crc kubenswrapper[4857]: I1128 16:15:23.485471 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerDied","Data":"8747b166680d6787dbaa96b34ad7627f4425d5461023003cc310db7963cf246b"} Nov 28 16:15:23 crc kubenswrapper[4857]: I1128 16:15:23.485502 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerStarted","Data":"0a7e09f8d8b91f69f2f5ae828fb3a3a779f5333090884b12317f0d1c3775c76f"} Nov 28 16:15:23 crc kubenswrapper[4857]: I1128 16:15:23.487790 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerStarted","Data":"201528b3880917c9e54202fe21c4cf51891865e1cb2e0e1e9d3a925a327952f6"} Nov 28 16:15:24 crc kubenswrapper[4857]: I1128 16:15:24.500709 4857 generic.go:334] "Generic (PLEG): container finished" podID="4ef14e76-3775-4c7b-872f-7557536d531d" containerID="fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656" exitCode=0 Nov 28 16:15:24 crc kubenswrapper[4857]: I1128 16:15:24.500890 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerDied","Data":"fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656"} Nov 28 16:15:24 crc kubenswrapper[4857]: I1128 16:15:24.507378 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerStarted","Data":"7e7eab410bcb31ce0b5861ebd5a27e5c1bcd48376728aed1dc6a4edf02d25d79"} Nov 28 16:15:25 crc kubenswrapper[4857]: I1128 16:15:25.520467 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerStarted","Data":"07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b"} Nov 28 16:15:25 crc kubenswrapper[4857]: I1128 16:15:25.525790 4857 generic.go:334] "Generic (PLEG): container finished" podID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerID="7e7eab410bcb31ce0b5861ebd5a27e5c1bcd48376728aed1dc6a4edf02d25d79" exitCode=0 Nov 28 16:15:25 crc kubenswrapper[4857]: I1128 16:15:25.525873 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerDied","Data":"7e7eab410bcb31ce0b5861ebd5a27e5c1bcd48376728aed1dc6a4edf02d25d79"} Nov 28 16:15:27 crc kubenswrapper[4857]: I1128 16:15:27.547668 4857 generic.go:334] "Generic (PLEG): container finished" podID="4ef14e76-3775-4c7b-872f-7557536d531d" containerID="07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b" exitCode=0 Nov 28 16:15:27 crc kubenswrapper[4857]: I1128 16:15:27.547749 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerDied","Data":"07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b"} Nov 28 16:15:27 crc kubenswrapper[4857]: I1128 16:15:27.554104 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerStarted","Data":"0cbee8712f083ad1395b9624637b359245aa3cb723d7d7eabd319d29480c5ab7"} Nov 28 16:15:27 crc kubenswrapper[4857]: I1128 16:15:27.605109 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-gm8dh" podStartSLOduration=2.769093049 podStartE2EDuration="5.605084645s" podCreationTimestamp="2025-11-28 16:15:22 +0000 UTC" firstStartedPulling="2025-11-28 16:15:23.487295068 +0000 UTC m=+9973.611236515" lastFinishedPulling="2025-11-28 16:15:26.323286674 +0000 UTC m=+9976.447228111" observedRunningTime="2025-11-28 16:15:27.595212444 +0000 UTC m=+9977.719153881" watchObservedRunningTime="2025-11-28 16:15:27.605084645 +0000 UTC m=+9977.729026082" Nov 28 16:15:28 crc kubenswrapper[4857]: I1128 16:15:28.229820 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:15:28 crc kubenswrapper[4857]: E1128 16:15:28.230187 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:15:28 crc kubenswrapper[4857]: I1128 16:15:28.565393 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerStarted","Data":"caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90"} Nov 28 16:15:28 crc kubenswrapper[4857]: I1128 16:15:28.583045 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qhlzc" podStartSLOduration=2.790912266 podStartE2EDuration="6.58303069s" podCreationTimestamp="2025-11-28 16:15:22 +0000 UTC" firstStartedPulling="2025-11-28 16:15:24.502612891 +0000 UTC m=+9974.626554328" lastFinishedPulling="2025-11-28 16:15:28.294731275 +0000 UTC m=+9978.418672752" observedRunningTime="2025-11-28 16:15:28.580715289 +0000 UTC m=+9978.704656736" watchObservedRunningTime="2025-11-28 16:15:28.58303069 +0000 UTC m=+9978.706972117" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.521300 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.524123 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.581857 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.681665 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.723928 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.724427 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:32 crc kubenswrapper[4857]: I1128 16:15:32.791029 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:33 crc kubenswrapper[4857]: I1128 16:15:33.664750 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:33 crc kubenswrapper[4857]: I1128 16:15:33.704219 4857 scope.go:117] "RemoveContainer" containerID="6c694e2a8a305947d717ce15e15503557041bca4224fb28d45b4e13f2718426a" Nov 28 16:15:34 crc kubenswrapper[4857]: I1128 16:15:34.166445 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gm8dh"] Nov 28 16:15:34 crc kubenswrapper[4857]: I1128 16:15:34.623210 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-gm8dh" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="registry-server" containerID="cri-o://0cbee8712f083ad1395b9624637b359245aa3cb723d7d7eabd319d29480c5ab7" gracePeriod=2 Nov 28 16:15:35 crc kubenswrapper[4857]: I1128 16:15:35.175741 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qhlzc"] Nov 28 16:15:35 crc kubenswrapper[4857]: I1128 16:15:35.639787 4857 generic.go:334] "Generic (PLEG): container finished" podID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerID="0cbee8712f083ad1395b9624637b359245aa3cb723d7d7eabd319d29480c5ab7" exitCode=0 Nov 28 16:15:35 crc kubenswrapper[4857]: I1128 16:15:35.639883 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerDied","Data":"0cbee8712f083ad1395b9624637b359245aa3cb723d7d7eabd319d29480c5ab7"} Nov 28 16:15:35 crc kubenswrapper[4857]: I1128 16:15:35.640367 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-qhlzc" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="registry-server" containerID="cri-o://caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90" gracePeriod=2 Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.065025 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.166436 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-utilities\") pod \"ae64e071-e073-423f-bf03-00f05e0b28b2\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.166528 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wmg49\" (UniqueName: \"kubernetes.io/projected/ae64e071-e073-423f-bf03-00f05e0b28b2-kube-api-access-wmg49\") pod \"ae64e071-e073-423f-bf03-00f05e0b28b2\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.166687 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-catalog-content\") pod \"ae64e071-e073-423f-bf03-00f05e0b28b2\" (UID: \"ae64e071-e073-423f-bf03-00f05e0b28b2\") " Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.168185 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-utilities" (OuterVolumeSpecName: "utilities") pod "ae64e071-e073-423f-bf03-00f05e0b28b2" (UID: "ae64e071-e073-423f-bf03-00f05e0b28b2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.174740 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae64e071-e073-423f-bf03-00f05e0b28b2-kube-api-access-wmg49" (OuterVolumeSpecName: "kube-api-access-wmg49") pod "ae64e071-e073-423f-bf03-00f05e0b28b2" (UID: "ae64e071-e073-423f-bf03-00f05e0b28b2"). InnerVolumeSpecName "kube-api-access-wmg49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.222590 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae64e071-e073-423f-bf03-00f05e0b28b2" (UID: "ae64e071-e073-423f-bf03-00f05e0b28b2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.263085 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.270466 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.270521 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wmg49\" (UniqueName: \"kubernetes.io/projected/ae64e071-e073-423f-bf03-00f05e0b28b2-kube-api-access-wmg49\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.270544 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae64e071-e073-423f-bf03-00f05e0b28b2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.371790 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-utilities\") pod \"4ef14e76-3775-4c7b-872f-7557536d531d\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.372657 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-utilities" (OuterVolumeSpecName: "utilities") pod "4ef14e76-3775-4c7b-872f-7557536d531d" (UID: "4ef14e76-3775-4c7b-872f-7557536d531d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.373060 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4m8kj\" (UniqueName: \"kubernetes.io/projected/4ef14e76-3775-4c7b-872f-7557536d531d-kube-api-access-4m8kj\") pod \"4ef14e76-3775-4c7b-872f-7557536d531d\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.373196 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-catalog-content\") pod \"4ef14e76-3775-4c7b-872f-7557536d531d\" (UID: \"4ef14e76-3775-4c7b-872f-7557536d531d\") " Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.374621 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.377763 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ef14e76-3775-4c7b-872f-7557536d531d-kube-api-access-4m8kj" (OuterVolumeSpecName: "kube-api-access-4m8kj") pod "4ef14e76-3775-4c7b-872f-7557536d531d" (UID: "4ef14e76-3775-4c7b-872f-7557536d531d"). InnerVolumeSpecName "kube-api-access-4m8kj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.428396 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ef14e76-3775-4c7b-872f-7557536d531d" (UID: "4ef14e76-3775-4c7b-872f-7557536d531d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.476241 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4m8kj\" (UniqueName: \"kubernetes.io/projected/4ef14e76-3775-4c7b-872f-7557536d531d-kube-api-access-4m8kj\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.476267 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ef14e76-3775-4c7b-872f-7557536d531d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.654524 4857 generic.go:334] "Generic (PLEG): container finished" podID="4ef14e76-3775-4c7b-872f-7557536d531d" containerID="caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90" exitCode=0 Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.654772 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerDied","Data":"caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90"} Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.654833 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qhlzc" event={"ID":"4ef14e76-3775-4c7b-872f-7557536d531d","Type":"ContainerDied","Data":"201528b3880917c9e54202fe21c4cf51891865e1cb2e0e1e9d3a925a327952f6"} Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.654865 4857 scope.go:117] "RemoveContainer" containerID="caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.655171 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qhlzc" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.658626 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-gm8dh" event={"ID":"ae64e071-e073-423f-bf03-00f05e0b28b2","Type":"ContainerDied","Data":"0a7e09f8d8b91f69f2f5ae828fb3a3a779f5333090884b12317f0d1c3775c76f"} Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.658686 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-gm8dh" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.689609 4857 scope.go:117] "RemoveContainer" containerID="07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.690626 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-gm8dh"] Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.716797 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-gm8dh"] Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.720927 4857 scope.go:117] "RemoveContainer" containerID="fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.737571 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-qhlzc"] Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.753588 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-qhlzc"] Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.782616 4857 scope.go:117] "RemoveContainer" containerID="caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90" Nov 28 16:15:36 crc kubenswrapper[4857]: E1128 16:15:36.783441 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90\": container with ID starting with caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90 not found: ID does not exist" containerID="caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.783493 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90"} err="failed to get container status \"caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90\": rpc error: code = NotFound desc = could not find container \"caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90\": container with ID starting with caccab5f44648f920b2cdc10bc44c97dd998efe6d918bf429c071f4389406c90 not found: ID does not exist" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.783526 4857 scope.go:117] "RemoveContainer" containerID="07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b" Nov 28 16:15:36 crc kubenswrapper[4857]: E1128 16:15:36.785314 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b\": container with ID starting with 07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b not found: ID does not exist" containerID="07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.785429 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b"} err="failed to get container status \"07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b\": rpc error: code = NotFound desc = could not find container \"07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b\": container with ID starting with 07b6411f490997d11007dd9720e20c90de817e8ee787f71de966460aa9503b3b not found: ID does not exist" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.785458 4857 scope.go:117] "RemoveContainer" containerID="fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656" Nov 28 16:15:36 crc kubenswrapper[4857]: E1128 16:15:36.785813 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656\": container with ID starting with fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656 not found: ID does not exist" containerID="fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.785849 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656"} err="failed to get container status \"fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656\": rpc error: code = NotFound desc = could not find container \"fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656\": container with ID starting with fa47d1b011a1e1338ecefb8fdf82b259affa1c95002ad48f34104e2573c36656 not found: ID does not exist" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.785870 4857 scope.go:117] "RemoveContainer" containerID="0cbee8712f083ad1395b9624637b359245aa3cb723d7d7eabd319d29480c5ab7" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.828232 4857 scope.go:117] "RemoveContainer" containerID="7e7eab410bcb31ce0b5861ebd5a27e5c1bcd48376728aed1dc6a4edf02d25d79" Nov 28 16:15:36 crc kubenswrapper[4857]: I1128 16:15:36.862908 4857 scope.go:117] "RemoveContainer" containerID="8747b166680d6787dbaa96b34ad7627f4425d5461023003cc310db7963cf246b" Nov 28 16:15:38 crc kubenswrapper[4857]: I1128 16:15:38.250599 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" path="/var/lib/kubelet/pods/4ef14e76-3775-4c7b-872f-7557536d531d/volumes" Nov 28 16:15:38 crc kubenswrapper[4857]: I1128 16:15:38.254404 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" path="/var/lib/kubelet/pods/ae64e071-e073-423f-bf03-00f05e0b28b2/volumes" Nov 28 16:15:39 crc kubenswrapper[4857]: I1128 16:15:39.229547 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:15:39 crc kubenswrapper[4857]: E1128 16:15:39.229876 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:15:53 crc kubenswrapper[4857]: I1128 16:15:53.229432 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:15:53 crc kubenswrapper[4857]: E1128 16:15:53.230210 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:15:58 crc kubenswrapper[4857]: I1128 16:15:58.950421 4857 generic.go:334] "Generic (PLEG): container finished" podID="6bb7d13b-f954-4298-b9e9-3b5d36f8591c" containerID="b77f5276691e4a0a46c40b5e9fc5a8a5b409f7a86e2a83523204568f179b886c" exitCode=0 Nov 28 16:15:58 crc kubenswrapper[4857]: I1128 16:15:58.950512 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" event={"ID":"6bb7d13b-f954-4298-b9e9-3b5d36f8591c","Type":"ContainerDied","Data":"b77f5276691e4a0a46c40b5e9fc5a8a5b409f7a86e2a83523204568f179b886c"} Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.421704 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.431802 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-0\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.431903 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ssh-key\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.431934 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-1\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432043 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ceph\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432077 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-1\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432102 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-1\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432155 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-combined-ca-bundle\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432201 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xx2gq\" (UniqueName: \"kubernetes.io/projected/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-kube-api-access-xx2gq\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432249 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-0\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432391 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-inventory\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.432433 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-0\") pod \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\" (UID: \"6bb7d13b-f954-4298-b9e9-3b5d36f8591c\") " Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.439154 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ceph" (OuterVolumeSpecName: "ceph") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "ceph". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.441406 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.500884 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-kube-api-access-xx2gq" (OuterVolumeSpecName: "kube-api-access-xx2gq") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "kube-api-access-xx2gq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.501404 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-1" (OuterVolumeSpecName: "nova-cells-global-config-1") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-cells-global-config-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.503581 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.508591 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.510844 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.519184 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-inventory" (OuterVolumeSpecName: "inventory") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.520166 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.536762 4857 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537208 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-1\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537225 4857 reconciler_common.go:293] "Volume detached for volume \"ceph\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-ceph\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537235 4857 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537244 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537253 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xx2gq\" (UniqueName: \"kubernetes.io/projected/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-kube-api-access-xx2gq\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537264 4857 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-inventory\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537273 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.537282 4857 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.541668 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.544088 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "6bb7d13b-f954-4298-b9e9-3b5d36f8591c" (UID: "6bb7d13b-f954-4298-b9e9-3b5d36f8591c"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.638219 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.638254 4857 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/6bb7d13b-f954-4298-b9e9-3b5d36f8591c-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.972744 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" event={"ID":"6bb7d13b-f954-4298-b9e9-3b5d36f8591c","Type":"ContainerDied","Data":"27a90a2a3485b85a2ec9f00ce07574665aa9bb033a58cb96bfe28c7cd8c25210"} Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.973010 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27a90a2a3485b85a2ec9f00ce07574665aa9bb033a58cb96bfe28c7cd8c25210" Nov 28 16:16:00 crc kubenswrapper[4857]: I1128 16:16:00.972828 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w" Nov 28 16:16:06 crc kubenswrapper[4857]: I1128 16:16:06.228664 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:16:06 crc kubenswrapper[4857]: E1128 16:16:06.229502 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:16:21 crc kubenswrapper[4857]: I1128 16:16:21.228584 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:16:21 crc kubenswrapper[4857]: E1128 16:16:21.229781 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:16:35 crc kubenswrapper[4857]: I1128 16:16:35.228292 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:16:35 crc kubenswrapper[4857]: E1128 16:16:35.229003 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:16:48 crc kubenswrapper[4857]: I1128 16:16:48.228751 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:16:48 crc kubenswrapper[4857]: E1128 16:16:48.229609 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:17:01 crc kubenswrapper[4857]: I1128 16:17:01.229572 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:17:01 crc kubenswrapper[4857]: E1128 16:17:01.230593 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:17:14 crc kubenswrapper[4857]: I1128 16:17:14.230268 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:17:14 crc kubenswrapper[4857]: E1128 16:17:14.230975 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:17:23 crc kubenswrapper[4857]: I1128 16:17:23.920173 4857 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/swift-operator-controller-manager-d77b94747-8f8nb" podUID="ea510000-70d9-4371-b791-6872e8d6905c" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.91:8081/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 28 16:17:29 crc kubenswrapper[4857]: I1128 16:17:29.230394 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:17:29 crc kubenswrapper[4857]: E1128 16:17:29.231185 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:17:44 crc kubenswrapper[4857]: I1128 16:17:44.229695 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:17:44 crc kubenswrapper[4857]: E1128 16:17:44.231303 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:17:58 crc kubenswrapper[4857]: I1128 16:17:58.230109 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:17:58 crc kubenswrapper[4857]: E1128 16:17:58.231414 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:18:09 crc kubenswrapper[4857]: I1128 16:18:09.228973 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:18:09 crc kubenswrapper[4857]: E1128 16:18:09.229885 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.511411 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.512455 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-copy-data" podUID="ccc6a333-9b75-487e-8d3a-740ec87a9136" containerName="adoption" containerID="cri-o://d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca" gracePeriod=30 Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.637679 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8cmz4"] Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638194 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="registry-server" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638219 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="registry-server" Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638246 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="extract-content" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638257 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="extract-content" Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638279 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6bb7d13b-f954-4298-b9e9-3b5d36f8591c" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638289 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6bb7d13b-f954-4298-b9e9-3b5d36f8591c" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638309 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="extract-utilities" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638318 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="extract-utilities" Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638334 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="registry-server" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638343 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="registry-server" Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638365 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="extract-utilities" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638374 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="extract-utilities" Nov 28 16:18:11 crc kubenswrapper[4857]: E1128 16:18:11.638395 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="extract-content" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638403 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="extract-content" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638632 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ef14e76-3775-4c7b-872f-7557536d531d" containerName="registry-server" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638651 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6bb7d13b-f954-4298-b9e9-3b5d36f8591c" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.638686 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae64e071-e073-423f-bf03-00f05e0b28b2" containerName="registry-server" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.643756 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.648393 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8cmz4"] Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.835796 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-utilities\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.836065 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-catalog-content\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.836100 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqx6w\" (UniqueName: \"kubernetes.io/projected/e071ccad-5d7b-4139-9ec9-96e277495b06-kube-api-access-mqx6w\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.937890 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-catalog-content\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.937958 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqx6w\" (UniqueName: \"kubernetes.io/projected/e071ccad-5d7b-4139-9ec9-96e277495b06-kube-api-access-mqx6w\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.938059 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-utilities\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.938498 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-catalog-content\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.938535 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-utilities\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.960314 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqx6w\" (UniqueName: \"kubernetes.io/projected/e071ccad-5d7b-4139-9ec9-96e277495b06-kube-api-access-mqx6w\") pod \"redhat-operators-8cmz4\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:11 crc kubenswrapper[4857]: I1128 16:18:11.971574 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:12 crc kubenswrapper[4857]: I1128 16:18:12.639877 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8cmz4"] Nov 28 16:18:12 crc kubenswrapper[4857]: I1128 16:18:12.714775 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerStarted","Data":"26c31c4d8f6275e59124002380c53ec013f81e0b49d5194666ec51b05ab8e855"} Nov 28 16:18:13 crc kubenswrapper[4857]: I1128 16:18:13.728468 4857 generic.go:334] "Generic (PLEG): container finished" podID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerID="5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15" exitCode=0 Nov 28 16:18:13 crc kubenswrapper[4857]: I1128 16:18:13.728587 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerDied","Data":"5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15"} Nov 28 16:18:15 crc kubenswrapper[4857]: I1128 16:18:15.756685 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerStarted","Data":"4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6"} Nov 28 16:18:19 crc kubenswrapper[4857]: I1128 16:18:19.808392 4857 generic.go:334] "Generic (PLEG): container finished" podID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerID="4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6" exitCode=0 Nov 28 16:18:19 crc kubenswrapper[4857]: I1128 16:18:19.808493 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerDied","Data":"4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6"} Nov 28 16:18:20 crc kubenswrapper[4857]: I1128 16:18:20.820620 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerStarted","Data":"9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38"} Nov 28 16:18:20 crc kubenswrapper[4857]: I1128 16:18:20.852333 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8cmz4" podStartSLOduration=3.284285699 podStartE2EDuration="9.85230524s" podCreationTimestamp="2025-11-28 16:18:11 +0000 UTC" firstStartedPulling="2025-11-28 16:18:13.731299973 +0000 UTC m=+10143.855241410" lastFinishedPulling="2025-11-28 16:18:20.299319514 +0000 UTC m=+10150.423260951" observedRunningTime="2025-11-28 16:18:20.838017302 +0000 UTC m=+10150.961958759" watchObservedRunningTime="2025-11-28 16:18:20.85230524 +0000 UTC m=+10150.976246687" Nov 28 16:18:21 crc kubenswrapper[4857]: I1128 16:18:21.971702 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:21 crc kubenswrapper[4857]: I1128 16:18:21.972205 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:23 crc kubenswrapper[4857]: I1128 16:18:23.024410 4857 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8cmz4" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="registry-server" probeResult="failure" output=< Nov 28 16:18:23 crc kubenswrapper[4857]: timeout: failed to connect service ":50051" within 1s Nov 28 16:18:23 crc kubenswrapper[4857]: > Nov 28 16:18:23 crc kubenswrapper[4857]: I1128 16:18:23.228667 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:18:23 crc kubenswrapper[4857]: E1128 16:18:23.229099 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:18:32 crc kubenswrapper[4857]: I1128 16:18:32.034298 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:32 crc kubenswrapper[4857]: I1128 16:18:32.101658 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:32 crc kubenswrapper[4857]: I1128 16:18:32.276961 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8cmz4"] Nov 28 16:18:33 crc kubenswrapper[4857]: I1128 16:18:33.976177 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8cmz4" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="registry-server" containerID="cri-o://9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38" gracePeriod=2 Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.228916 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:18:34 crc kubenswrapper[4857]: E1128 16:18:34.229238 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.707912 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.831860 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-utilities\") pod \"e071ccad-5d7b-4139-9ec9-96e277495b06\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.832386 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqx6w\" (UniqueName: \"kubernetes.io/projected/e071ccad-5d7b-4139-9ec9-96e277495b06-kube-api-access-mqx6w\") pod \"e071ccad-5d7b-4139-9ec9-96e277495b06\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.832773 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-catalog-content\") pod \"e071ccad-5d7b-4139-9ec9-96e277495b06\" (UID: \"e071ccad-5d7b-4139-9ec9-96e277495b06\") " Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.835446 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-utilities" (OuterVolumeSpecName: "utilities") pod "e071ccad-5d7b-4139-9ec9-96e277495b06" (UID: "e071ccad-5d7b-4139-9ec9-96e277495b06"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.840898 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e071ccad-5d7b-4139-9ec9-96e277495b06-kube-api-access-mqx6w" (OuterVolumeSpecName: "kube-api-access-mqx6w") pod "e071ccad-5d7b-4139-9ec9-96e277495b06" (UID: "e071ccad-5d7b-4139-9ec9-96e277495b06"). InnerVolumeSpecName "kube-api-access-mqx6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.936513 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.936622 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqx6w\" (UniqueName: \"kubernetes.io/projected/e071ccad-5d7b-4139-9ec9-96e277495b06-kube-api-access-mqx6w\") on node \"crc\" DevicePath \"\"" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.957385 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e071ccad-5d7b-4139-9ec9-96e277495b06" (UID: "e071ccad-5d7b-4139-9ec9-96e277495b06"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.991393 4857 generic.go:334] "Generic (PLEG): container finished" podID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerID="9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38" exitCode=0 Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.991434 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerDied","Data":"9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38"} Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.991462 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8cmz4" event={"ID":"e071ccad-5d7b-4139-9ec9-96e277495b06","Type":"ContainerDied","Data":"26c31c4d8f6275e59124002380c53ec013f81e0b49d5194666ec51b05ab8e855"} Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.991482 4857 scope.go:117] "RemoveContainer" containerID="9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38" Nov 28 16:18:34 crc kubenswrapper[4857]: I1128 16:18:34.991476 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8cmz4" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.014728 4857 scope.go:117] "RemoveContainer" containerID="4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.032974 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8cmz4"] Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.038924 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e071ccad-5d7b-4139-9ec9-96e277495b06-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.043316 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8cmz4"] Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.057589 4857 scope.go:117] "RemoveContainer" containerID="5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.104425 4857 scope.go:117] "RemoveContainer" containerID="9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38" Nov 28 16:18:35 crc kubenswrapper[4857]: E1128 16:18:35.105009 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38\": container with ID starting with 9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38 not found: ID does not exist" containerID="9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.105068 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38"} err="failed to get container status \"9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38\": rpc error: code = NotFound desc = could not find container \"9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38\": container with ID starting with 9b2d510cbb09c8b131b472a3f8f9cb1d261c3b0aecfa9505467fe75908b56c38 not found: ID does not exist" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.105104 4857 scope.go:117] "RemoveContainer" containerID="4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6" Nov 28 16:18:35 crc kubenswrapper[4857]: E1128 16:18:35.105606 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6\": container with ID starting with 4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6 not found: ID does not exist" containerID="4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.105652 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6"} err="failed to get container status \"4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6\": rpc error: code = NotFound desc = could not find container \"4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6\": container with ID starting with 4bcc4468e1ca6687a978538573309b090c54a90bb350b2dadfaa891a85aee5f6 not found: ID does not exist" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.105676 4857 scope.go:117] "RemoveContainer" containerID="5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15" Nov 28 16:18:35 crc kubenswrapper[4857]: E1128 16:18:35.105931 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15\": container with ID starting with 5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15 not found: ID does not exist" containerID="5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15" Nov 28 16:18:35 crc kubenswrapper[4857]: I1128 16:18:35.105993 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15"} err="failed to get container status \"5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15\": rpc error: code = NotFound desc = could not find container \"5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15\": container with ID starting with 5e8363d8bc4e259a8cb1b03c92ae718a4a72cdc96a9c860ac7b88c188d517f15 not found: ID does not exist" Nov 28 16:18:36 crc kubenswrapper[4857]: I1128 16:18:36.242409 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" path="/var/lib/kubelet/pods/e071ccad-5d7b-4139-9ec9-96e277495b06/volumes" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.057779 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.067709 4857 generic.go:334] "Generic (PLEG): container finished" podID="ccc6a333-9b75-487e-8d3a-740ec87a9136" containerID="d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca" exitCode=137 Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.067764 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"ccc6a333-9b75-487e-8d3a-740ec87a9136","Type":"ContainerDied","Data":"d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca"} Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.067796 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"ccc6a333-9b75-487e-8d3a-740ec87a9136","Type":"ContainerDied","Data":"7d2d8776a59be36366f96fc3dfed0523a27d8592f54fa04b4cfc839553db52fd"} Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.067816 4857 scope.go:117] "RemoveContainer" containerID="d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.067968 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.092028 4857 scope.go:117] "RemoveContainer" containerID="d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca" Nov 28 16:18:42 crc kubenswrapper[4857]: E1128 16:18:42.092740 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca\": container with ID starting with d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca not found: ID does not exist" containerID="d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.092774 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca"} err="failed to get container status \"d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca\": rpc error: code = NotFound desc = could not find container \"d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca\": container with ID starting with d91334afb40a193be62366514e6b8f1c461c9d16204c8da88bfce65c30e349ca not found: ID does not exist" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.211664 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mariadb-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") pod \"ccc6a333-9b75-487e-8d3a-740ec87a9136\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.211752 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7n6g\" (UniqueName: \"kubernetes.io/projected/ccc6a333-9b75-487e-8d3a-740ec87a9136-kube-api-access-q7n6g\") pod \"ccc6a333-9b75-487e-8d3a-740ec87a9136\" (UID: \"ccc6a333-9b75-487e-8d3a-740ec87a9136\") " Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.218278 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccc6a333-9b75-487e-8d3a-740ec87a9136-kube-api-access-q7n6g" (OuterVolumeSpecName: "kube-api-access-q7n6g") pod "ccc6a333-9b75-487e-8d3a-740ec87a9136" (UID: "ccc6a333-9b75-487e-8d3a-740ec87a9136"). InnerVolumeSpecName "kube-api-access-q7n6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.241761 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a" (OuterVolumeSpecName: "mariadb-data") pod "ccc6a333-9b75-487e-8d3a-740ec87a9136" (UID: "ccc6a333-9b75-487e-8d3a-740ec87a9136"). InnerVolumeSpecName "pvc-b30d6f06-e654-4101-80a0-1cc8f143281a". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.315140 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") on node \"crc\" " Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.315170 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7n6g\" (UniqueName: \"kubernetes.io/projected/ccc6a333-9b75-487e-8d3a-740ec87a9136-kube-api-access-q7n6g\") on node \"crc\" DevicePath \"\"" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.395079 4857 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.395217 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.395238 4857 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-b30d6f06-e654-4101-80a0-1cc8f143281a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a") on node "crc" Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.408090 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-copy-data"] Nov 28 16:18:42 crc kubenswrapper[4857]: I1128 16:18:42.416816 4857 reconciler_common.go:293] "Volume detached for volume \"pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-b30d6f06-e654-4101-80a0-1cc8f143281a\") on node \"crc\" DevicePath \"\"" Nov 28 16:18:43 crc kubenswrapper[4857]: I1128 16:18:43.031387 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 16:18:43 crc kubenswrapper[4857]: I1128 16:18:43.031830 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-copy-data" podUID="f83a1a23-ed01-4d84-b3bf-e55e4268e093" containerName="adoption" containerID="cri-o://608698b4649c31cd4fbda1000524a37011504be56433b2e00e645a65704dcdef" gracePeriod=30 Nov 28 16:18:44 crc kubenswrapper[4857]: I1128 16:18:44.244749 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccc6a333-9b75-487e-8d3a-740ec87a9136" path="/var/lib/kubelet/pods/ccc6a333-9b75-487e-8d3a-740ec87a9136/volumes" Nov 28 16:18:47 crc kubenswrapper[4857]: I1128 16:18:47.229760 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:18:47 crc kubenswrapper[4857]: E1128 16:18:47.230843 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:19:00 crc kubenswrapper[4857]: I1128 16:19:00.238090 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:19:00 crc kubenswrapper[4857]: E1128 16:19:00.239266 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.229159 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:19:13 crc kubenswrapper[4857]: E1128 16:19:13.230247 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.466842 4857 generic.go:334] "Generic (PLEG): container finished" podID="f83a1a23-ed01-4d84-b3bf-e55e4268e093" containerID="608698b4649c31cd4fbda1000524a37011504be56433b2e00e645a65704dcdef" exitCode=137 Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.466879 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f83a1a23-ed01-4d84-b3bf-e55e4268e093","Type":"ContainerDied","Data":"608698b4649c31cd4fbda1000524a37011504be56433b2e00e645a65704dcdef"} Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.466903 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"f83a1a23-ed01-4d84-b3bf-e55e4268e093","Type":"ContainerDied","Data":"006d1339d7b1aec88d4c011b40ad08dabc59f6a5df0a206301b9bb7d530ae4af"} Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.466914 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="006d1339d7b1aec88d4c011b40ad08dabc59f6a5df0a206301b9bb7d530ae4af" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.583977 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.695338 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gk44g\" (UniqueName: \"kubernetes.io/projected/f83a1a23-ed01-4d84-b3bf-e55e4268e093-kube-api-access-gk44g\") pod \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.696048 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") pod \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.696223 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f83a1a23-ed01-4d84-b3bf-e55e4268e093-ovn-data-cert\") pod \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\" (UID: \"f83a1a23-ed01-4d84-b3bf-e55e4268e093\") " Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.703621 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f83a1a23-ed01-4d84-b3bf-e55e4268e093-kube-api-access-gk44g" (OuterVolumeSpecName: "kube-api-access-gk44g") pod "f83a1a23-ed01-4d84-b3bf-e55e4268e093" (UID: "f83a1a23-ed01-4d84-b3bf-e55e4268e093"). InnerVolumeSpecName "kube-api-access-gk44g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.710170 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f83a1a23-ed01-4d84-b3bf-e55e4268e093-ovn-data-cert" (OuterVolumeSpecName: "ovn-data-cert") pod "f83a1a23-ed01-4d84-b3bf-e55e4268e093" (UID: "f83a1a23-ed01-4d84-b3bf-e55e4268e093"). InnerVolumeSpecName "ovn-data-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.721727 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f" (OuterVolumeSpecName: "ovn-data") pod "f83a1a23-ed01-4d84-b3bf-e55e4268e093" (UID: "f83a1a23-ed01-4d84-b3bf-e55e4268e093"). InnerVolumeSpecName "pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.798910 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gk44g\" (UniqueName: \"kubernetes.io/projected/f83a1a23-ed01-4d84-b3bf-e55e4268e093-kube-api-access-gk44g\") on node \"crc\" DevicePath \"\"" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.799018 4857 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") on node \"crc\" " Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.799045 4857 reconciler_common.go:293] "Volume detached for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/f83a1a23-ed01-4d84-b3bf-e55e4268e093-ovn-data-cert\") on node \"crc\" DevicePath \"\"" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.844451 4857 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.844795 4857 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f") on node "crc" Nov 28 16:19:13 crc kubenswrapper[4857]: I1128 16:19:13.900934 4857 reconciler_common.go:293] "Volume detached for volume \"pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-1e50f717-2312-4a7f-b55c-3f7fbaa30c3f\") on node \"crc\" DevicePath \"\"" Nov 28 16:19:14 crc kubenswrapper[4857]: I1128 16:19:14.477231 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 28 16:19:14 crc kubenswrapper[4857]: I1128 16:19:14.505197 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 16:19:14 crc kubenswrapper[4857]: I1128 16:19:14.517443 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-copy-data"] Nov 28 16:19:16 crc kubenswrapper[4857]: I1128 16:19:16.244099 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f83a1a23-ed01-4d84-b3bf-e55e4268e093" path="/var/lib/kubelet/pods/f83a1a23-ed01-4d84-b3bf-e55e4268e093/volumes" Nov 28 16:19:24 crc kubenswrapper[4857]: I1128 16:19:24.232851 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:19:24 crc kubenswrapper[4857]: E1128 16:19:24.233754 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:19:33 crc kubenswrapper[4857]: I1128 16:19:33.850275 4857 scope.go:117] "RemoveContainer" containerID="608698b4649c31cd4fbda1000524a37011504be56433b2e00e645a65704dcdef" Nov 28 16:19:39 crc kubenswrapper[4857]: I1128 16:19:39.229580 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:19:39 crc kubenswrapper[4857]: E1128 16:19:39.230860 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:19:52 crc kubenswrapper[4857]: I1128 16:19:52.229817 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:19:52 crc kubenswrapper[4857]: E1128 16:19:52.230694 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:20:05 crc kubenswrapper[4857]: I1128 16:20:05.228813 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:20:05 crc kubenswrapper[4857]: E1128 16:20:05.230297 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.779995 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-swvhw/must-gather-jl8r5"] Nov 28 16:20:10 crc kubenswrapper[4857]: E1128 16:20:10.781151 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="registry-server" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781170 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="registry-server" Nov 28 16:20:10 crc kubenswrapper[4857]: E1128 16:20:10.781198 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f83a1a23-ed01-4d84-b3bf-e55e4268e093" containerName="adoption" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781205 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="f83a1a23-ed01-4d84-b3bf-e55e4268e093" containerName="adoption" Nov 28 16:20:10 crc kubenswrapper[4857]: E1128 16:20:10.781235 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="extract-content" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781244 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="extract-content" Nov 28 16:20:10 crc kubenswrapper[4857]: E1128 16:20:10.781262 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccc6a333-9b75-487e-8d3a-740ec87a9136" containerName="adoption" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781269 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccc6a333-9b75-487e-8d3a-740ec87a9136" containerName="adoption" Nov 28 16:20:10 crc kubenswrapper[4857]: E1128 16:20:10.781292 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="extract-utilities" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781300 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="extract-utilities" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781720 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccc6a333-9b75-487e-8d3a-740ec87a9136" containerName="adoption" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781744 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="e071ccad-5d7b-4139-9ec9-96e277495b06" containerName="registry-server" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.781777 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="f83a1a23-ed01-4d84-b3bf-e55e4268e093" containerName="adoption" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.783277 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.814815 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-swvhw"/"kube-root-ca.crt" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.814988 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-swvhw"/"default-dockercfg-7lq24" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.817230 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-swvhw"/"openshift-service-ca.crt" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.855748 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-must-gather-output\") pod \"must-gather-jl8r5\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.855829 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv4nm\" (UniqueName: \"kubernetes.io/projected/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-kube-api-access-mv4nm\") pod \"must-gather-jl8r5\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.906039 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-swvhw/must-gather-jl8r5"] Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.958314 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-must-gather-output\") pod \"must-gather-jl8r5\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.958377 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv4nm\" (UniqueName: \"kubernetes.io/projected/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-kube-api-access-mv4nm\") pod \"must-gather-jl8r5\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:10 crc kubenswrapper[4857]: I1128 16:20:10.958891 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-must-gather-output\") pod \"must-gather-jl8r5\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:11 crc kubenswrapper[4857]: I1128 16:20:11.584556 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv4nm\" (UniqueName: \"kubernetes.io/projected/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-kube-api-access-mv4nm\") pod \"must-gather-jl8r5\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:11 crc kubenswrapper[4857]: I1128 16:20:11.751758 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:20:12 crc kubenswrapper[4857]: W1128 16:20:12.231590 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podab9ccee8_48a0_4596_b4dd_575578b5d6fc.slice/crio-ec279bc4050ebeafcd5efac6cd811af7fc7dab0c3ddf60a42b40bec8d0aea785 WatchSource:0}: Error finding container ec279bc4050ebeafcd5efac6cd811af7fc7dab0c3ddf60a42b40bec8d0aea785: Status 404 returned error can't find the container with id ec279bc4050ebeafcd5efac6cd811af7fc7dab0c3ddf60a42b40bec8d0aea785 Nov 28 16:20:12 crc kubenswrapper[4857]: I1128 16:20:12.233389 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:20:12 crc kubenswrapper[4857]: I1128 16:20:12.247294 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-swvhw/must-gather-jl8r5"] Nov 28 16:20:13 crc kubenswrapper[4857]: I1128 16:20:13.123642 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/must-gather-jl8r5" event={"ID":"ab9ccee8-48a0-4596-b4dd-575578b5d6fc","Type":"ContainerStarted","Data":"ec279bc4050ebeafcd5efac6cd811af7fc7dab0c3ddf60a42b40bec8d0aea785"} Nov 28 16:20:17 crc kubenswrapper[4857]: I1128 16:20:17.183356 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/must-gather-jl8r5" event={"ID":"ab9ccee8-48a0-4596-b4dd-575578b5d6fc","Type":"ContainerStarted","Data":"8928d24d902fc449a4b4aeabaa809a8afdb503dff0a130afe8dfdf71b1ed8fcf"} Nov 28 16:20:17 crc kubenswrapper[4857]: I1128 16:20:17.183818 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/must-gather-jl8r5" event={"ID":"ab9ccee8-48a0-4596-b4dd-575578b5d6fc","Type":"ContainerStarted","Data":"624a0595ecab4e99cecc273ba5d55dbec6292774988bdd5f378268c5a2d4bdd9"} Nov 28 16:20:17 crc kubenswrapper[4857]: I1128 16:20:17.209542 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-swvhw/must-gather-jl8r5" podStartSLOduration=3.531788066 podStartE2EDuration="7.209499914s" podCreationTimestamp="2025-11-28 16:20:10 +0000 UTC" firstStartedPulling="2025-11-28 16:20:12.233062258 +0000 UTC m=+10262.357003715" lastFinishedPulling="2025-11-28 16:20:15.910774126 +0000 UTC m=+10266.034715563" observedRunningTime="2025-11-28 16:20:17.200060105 +0000 UTC m=+10267.324001542" watchObservedRunningTime="2025-11-28 16:20:17.209499914 +0000 UTC m=+10267.333441351" Nov 28 16:20:17 crc kubenswrapper[4857]: I1128 16:20:17.229454 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:20:18 crc kubenswrapper[4857]: I1128 16:20:18.196629 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"78b43531a5e30b821031568eb8482e23fe9d7dd1f41427c4efa4122a4c8e8ebc"} Nov 28 16:20:19 crc kubenswrapper[4857]: E1128 16:20:19.254402 4857 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.222:38020->38.102.83.222:43621: write tcp 38.102.83.222:38020->38.102.83.222:43621: write: broken pipe Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.518880 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-swvhw/crc-debug-hdjxk"] Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.521089 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.590755 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmztg\" (UniqueName: \"kubernetes.io/projected/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-kube-api-access-qmztg\") pod \"crc-debug-hdjxk\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.590976 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-host\") pod \"crc-debug-hdjxk\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.692713 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-host\") pod \"crc-debug-hdjxk\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.692916 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmztg\" (UniqueName: \"kubernetes.io/projected/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-kube-api-access-qmztg\") pod \"crc-debug-hdjxk\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.693403 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-host\") pod \"crc-debug-hdjxk\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.714586 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmztg\" (UniqueName: \"kubernetes.io/projected/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-kube-api-access-qmztg\") pod \"crc-debug-hdjxk\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: I1128 16:20:20.845159 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:20 crc kubenswrapper[4857]: W1128 16:20:20.892616 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod19ad1c4a_fc23_4eb5_ab0a_5ba9dd1a0e02.slice/crio-3e43ef800238adad4c737215b4f48949774c7f31abe61389a7e81d9333184a3e WatchSource:0}: Error finding container 3e43ef800238adad4c737215b4f48949774c7f31abe61389a7e81d9333184a3e: Status 404 returned error can't find the container with id 3e43ef800238adad4c737215b4f48949774c7f31abe61389a7e81d9333184a3e Nov 28 16:20:21 crc kubenswrapper[4857]: I1128 16:20:21.227394 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" event={"ID":"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02","Type":"ContainerStarted","Data":"3e43ef800238adad4c737215b4f48949774c7f31abe61389a7e81d9333184a3e"} Nov 28 16:20:34 crc kubenswrapper[4857]: I1128 16:20:34.411856 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" event={"ID":"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02","Type":"ContainerStarted","Data":"69457fbc5c102af05d12fddfbae3afda2bc3315e5bd7627cfd5855ebecd23fac"} Nov 28 16:20:34 crc kubenswrapper[4857]: I1128 16:20:34.435328 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" podStartSLOduration=1.540153602 podStartE2EDuration="14.435311543s" podCreationTimestamp="2025-11-28 16:20:20 +0000 UTC" firstStartedPulling="2025-11-28 16:20:20.895555574 +0000 UTC m=+10271.019497011" lastFinishedPulling="2025-11-28 16:20:33.790713515 +0000 UTC m=+10283.914654952" observedRunningTime="2025-11-28 16:20:34.432055756 +0000 UTC m=+10284.555997193" watchObservedRunningTime="2025-11-28 16:20:34.435311543 +0000 UTC m=+10284.559252980" Nov 28 16:20:56 crc kubenswrapper[4857]: I1128 16:20:56.648614 4857 generic.go:334] "Generic (PLEG): container finished" podID="19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" containerID="69457fbc5c102af05d12fddfbae3afda2bc3315e5bd7627cfd5855ebecd23fac" exitCode=0 Nov 28 16:20:56 crc kubenswrapper[4857]: I1128 16:20:56.649295 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" event={"ID":"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02","Type":"ContainerDied","Data":"69457fbc5c102af05d12fddfbae3afda2bc3315e5bd7627cfd5855ebecd23fac"} Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.780105 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.822580 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-swvhw/crc-debug-hdjxk"] Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.839113 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-swvhw/crc-debug-hdjxk"] Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.948539 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-host\") pod \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.948625 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmztg\" (UniqueName: \"kubernetes.io/projected/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-kube-api-access-qmztg\") pod \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\" (UID: \"19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02\") " Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.948689 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-host" (OuterVolumeSpecName: "host") pod "19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" (UID: "19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.949182 4857 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:57 crc kubenswrapper[4857]: I1128 16:20:57.959118 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-kube-api-access-qmztg" (OuterVolumeSpecName: "kube-api-access-qmztg") pod "19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" (UID: "19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02"). InnerVolumeSpecName "kube-api-access-qmztg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:20:58 crc kubenswrapper[4857]: I1128 16:20:58.050926 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmztg\" (UniqueName: \"kubernetes.io/projected/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02-kube-api-access-qmztg\") on node \"crc\" DevicePath \"\"" Nov 28 16:20:58 crc kubenswrapper[4857]: I1128 16:20:58.240230 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" path="/var/lib/kubelet/pods/19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02/volumes" Nov 28 16:20:58 crc kubenswrapper[4857]: I1128 16:20:58.670473 4857 scope.go:117] "RemoveContainer" containerID="69457fbc5c102af05d12fddfbae3afda2bc3315e5bd7627cfd5855ebecd23fac" Nov 28 16:20:58 crc kubenswrapper[4857]: I1128 16:20:58.670495 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-hdjxk" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.174611 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-swvhw/crc-debug-29bgj"] Nov 28 16:20:59 crc kubenswrapper[4857]: E1128 16:20:59.175319 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" containerName="container-00" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.175330 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" containerName="container-00" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.175541 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="19ad1c4a-fc23-4eb5-ab0a-5ba9dd1a0e02" containerName="container-00" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.176264 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.276774 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ssl8\" (UniqueName: \"kubernetes.io/projected/299af1a6-6f2e-4aff-82c6-a4369c0293c3-kube-api-access-7ssl8\") pod \"crc-debug-29bgj\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.277109 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/299af1a6-6f2e-4aff-82c6-a4369c0293c3-host\") pod \"crc-debug-29bgj\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.379508 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ssl8\" (UniqueName: \"kubernetes.io/projected/299af1a6-6f2e-4aff-82c6-a4369c0293c3-kube-api-access-7ssl8\") pod \"crc-debug-29bgj\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.379689 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/299af1a6-6f2e-4aff-82c6-a4369c0293c3-host\") pod \"crc-debug-29bgj\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.379835 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/299af1a6-6f2e-4aff-82c6-a4369c0293c3-host\") pod \"crc-debug-29bgj\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.397515 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ssl8\" (UniqueName: \"kubernetes.io/projected/299af1a6-6f2e-4aff-82c6-a4369c0293c3-kube-api-access-7ssl8\") pod \"crc-debug-29bgj\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.493804 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:20:59 crc kubenswrapper[4857]: I1128 16:20:59.680759 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/crc-debug-29bgj" event={"ID":"299af1a6-6f2e-4aff-82c6-a4369c0293c3","Type":"ContainerStarted","Data":"909249110ab0dca943ae0ca7aba654b2bf2196216d33b55b8fd650c9159cf33e"} Nov 28 16:21:00 crc kubenswrapper[4857]: I1128 16:21:00.694511 4857 generic.go:334] "Generic (PLEG): container finished" podID="299af1a6-6f2e-4aff-82c6-a4369c0293c3" containerID="81db52dfda2cb654a50096d2e18cf587df6f7b4767459ac55ba0682e12db99c6" exitCode=1 Nov 28 16:21:00 crc kubenswrapper[4857]: I1128 16:21:00.694564 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/crc-debug-29bgj" event={"ID":"299af1a6-6f2e-4aff-82c6-a4369c0293c3","Type":"ContainerDied","Data":"81db52dfda2cb654a50096d2e18cf587df6f7b4767459ac55ba0682e12db99c6"} Nov 28 16:21:00 crc kubenswrapper[4857]: I1128 16:21:00.736918 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-swvhw/crc-debug-29bgj"] Nov 28 16:21:00 crc kubenswrapper[4857]: I1128 16:21:00.748037 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-swvhw/crc-debug-29bgj"] Nov 28 16:21:01 crc kubenswrapper[4857]: I1128 16:21:01.832956 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:21:01 crc kubenswrapper[4857]: I1128 16:21:01.941163 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ssl8\" (UniqueName: \"kubernetes.io/projected/299af1a6-6f2e-4aff-82c6-a4369c0293c3-kube-api-access-7ssl8\") pod \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " Nov 28 16:21:01 crc kubenswrapper[4857]: I1128 16:21:01.941488 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/299af1a6-6f2e-4aff-82c6-a4369c0293c3-host\") pod \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\" (UID: \"299af1a6-6f2e-4aff-82c6-a4369c0293c3\") " Nov 28 16:21:01 crc kubenswrapper[4857]: I1128 16:21:01.942249 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/299af1a6-6f2e-4aff-82c6-a4369c0293c3-host" (OuterVolumeSpecName: "host") pod "299af1a6-6f2e-4aff-82c6-a4369c0293c3" (UID: "299af1a6-6f2e-4aff-82c6-a4369c0293c3"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 28 16:21:01 crc kubenswrapper[4857]: I1128 16:21:01.950287 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/299af1a6-6f2e-4aff-82c6-a4369c0293c3-kube-api-access-7ssl8" (OuterVolumeSpecName: "kube-api-access-7ssl8") pod "299af1a6-6f2e-4aff-82c6-a4369c0293c3" (UID: "299af1a6-6f2e-4aff-82c6-a4369c0293c3"). InnerVolumeSpecName "kube-api-access-7ssl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:21:02 crc kubenswrapper[4857]: I1128 16:21:02.044855 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ssl8\" (UniqueName: \"kubernetes.io/projected/299af1a6-6f2e-4aff-82c6-a4369c0293c3-kube-api-access-7ssl8\") on node \"crc\" DevicePath \"\"" Nov 28 16:21:02 crc kubenswrapper[4857]: I1128 16:21:02.044890 4857 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/299af1a6-6f2e-4aff-82c6-a4369c0293c3-host\") on node \"crc\" DevicePath \"\"" Nov 28 16:21:02 crc kubenswrapper[4857]: I1128 16:21:02.241931 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="299af1a6-6f2e-4aff-82c6-a4369c0293c3" path="/var/lib/kubelet/pods/299af1a6-6f2e-4aff-82c6-a4369c0293c3/volumes" Nov 28 16:21:02 crc kubenswrapper[4857]: I1128 16:21:02.713114 4857 scope.go:117] "RemoveContainer" containerID="81db52dfda2cb654a50096d2e18cf587df6f7b4767459ac55ba0682e12db99c6" Nov 28 16:21:02 crc kubenswrapper[4857]: I1128 16:21:02.713154 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/crc-debug-29bgj" Nov 28 16:22:41 crc kubenswrapper[4857]: I1128 16:22:41.309042 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:22:41 crc kubenswrapper[4857]: I1128 16:22:41.309508 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:11 crc kubenswrapper[4857]: I1128 16:23:11.308384 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:11 crc kubenswrapper[4857]: I1128 16:23:11.308998 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:41 crc kubenswrapper[4857]: I1128 16:23:41.308763 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:23:41 crc kubenswrapper[4857]: I1128 16:23:41.310513 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:23:41 crc kubenswrapper[4857]: I1128 16:23:41.310668 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 16:23:41 crc kubenswrapper[4857]: I1128 16:23:41.311660 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"78b43531a5e30b821031568eb8482e23fe9d7dd1f41427c4efa4122a4c8e8ebc"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:23:41 crc kubenswrapper[4857]: I1128 16:23:41.311830 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://78b43531a5e30b821031568eb8482e23fe9d7dd1f41427c4efa4122a4c8e8ebc" gracePeriod=600 Nov 28 16:23:42 crc kubenswrapper[4857]: I1128 16:23:42.396332 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="78b43531a5e30b821031568eb8482e23fe9d7dd1f41427c4efa4122a4c8e8ebc" exitCode=0 Nov 28 16:23:42 crc kubenswrapper[4857]: I1128 16:23:42.396429 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"78b43531a5e30b821031568eb8482e23fe9d7dd1f41427c4efa4122a4c8e8ebc"} Nov 28 16:23:42 crc kubenswrapper[4857]: I1128 16:23:42.397444 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917"} Nov 28 16:23:42 crc kubenswrapper[4857]: I1128 16:23:42.397476 4857 scope.go:117] "RemoveContainer" containerID="c74305952f95e4003fa797d662a671565c73d0c150114e268ea397949f69fe95" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.372268 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_95a5f00a-164f-43ea-8d38-0f9763cedfa4/init-config-reloader/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.568035 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_95a5f00a-164f-43ea-8d38-0f9763cedfa4/alertmanager/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.588720 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_95a5f00a-164f-43ea-8d38-0f9763cedfa4/init-config-reloader/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.625002 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_95a5f00a-164f-43ea-8d38-0f9763cedfa4/config-reloader/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.755834 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_f113ffd3-ebde-4e6a-b805-25abf9dc82dc/aodh-api/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.821642 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_f113ffd3-ebde-4e6a-b805-25abf9dc82dc/aodh-evaluator/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.850841 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_f113ffd3-ebde-4e6a-b805-25abf9dc82dc/aodh-listener/0.log" Nov 28 16:24:03 crc kubenswrapper[4857]: I1128 16:24:03.973751 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_f113ffd3-ebde-4e6a-b805-25abf9dc82dc/aodh-notifier/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.024704 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5865985776-dd6dg_d7430258-c1e6-4046-8da2-e35900b3647d/barbican-api/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.066248 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5865985776-dd6dg_d7430258-c1e6-4046-8da2-e35900b3647d/barbican-api-log/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.231373 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6c66ddbb4-c47hg_fd23dc94-d5ca-4419-a180-56bb75922c4b/barbican-keystone-listener/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.321021 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6c66ddbb4-c47hg_fd23dc94-d5ca-4419-a180-56bb75922c4b/barbican-keystone-listener-log/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.430920 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6cf5b4dc75-h9p5p_ad527350-39bf-416d-9b8b-0896ac4012d6/barbican-worker/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.474506 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6cf5b4dc75-h9p5p_ad527350-39bf-416d-9b8b-0896ac4012d6/barbican-worker-log/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.617389 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-9gtn8_d2abab34-1dd6-4fd0-9799-c1a9ed22c5cb/bootstrap-openstack-openstack-cell1/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.706734 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_32ca0f07-a0bc-43f3-8095-256d0b40e335/ceilometer-central-agent/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.845533 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_32ca0f07-a0bc-43f3-8095-256d0b40e335/proxy-httpd/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.848805 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_32ca0f07-a0bc-43f3-8095-256d0b40e335/ceilometer-notification-agent/0.log" Nov 28 16:24:04 crc kubenswrapper[4857]: I1128 16:24:04.900288 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_32ca0f07-a0bc-43f3-8095-256d0b40e335/sg-core/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.073784 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph-client-openstack-openstack-cell1-ffn9w_3c944ef4-8d62-4a01-8b7c-da8fba5d942f/ceph-client-openstack-openstack-cell1/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.208294 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_f4667b41-d1a9-4a7b-9756-1a3d1e8e5170/cinder-api/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.226103 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_f4667b41-d1a9-4a7b-9756-1a3d1e8e5170/cinder-api-log/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.548251 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_fd47b944-e376-44ce-888f-f5f75474d0d9/cinder-backup/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.582919 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-backup-0_fd47b944-e376-44ce-888f-f5f75474d0d9/probe/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.670045 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_845ef7c8-2be0-41d1-b61d-9bdcfec6b018/cinder-scheduler/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.747977 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_845ef7c8-2be0-41d1-b61d-9bdcfec6b018/probe/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.857887 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_77a9c727-7461-4b83-b446-b9958a4de940/cinder-volume/0.log" Nov 28 16:24:05 crc kubenswrapper[4857]: I1128 16:24:05.967325 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-volume-volume1-0_77a9c727-7461-4b83-b446-b9958a4de940/probe/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.042632 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-tw87j_957ab666-234a-4cd4-827f-746823e02d5a/configure-network-openstack-openstack-cell1/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.190000 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-wlm6l_ab248fa2-2b7f-4571-ba24-ed45686e9d06/configure-os-openstack-openstack-cell1/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.334778 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5768f785f9-lgxzj_2895b69d-98a4-41f5-8a13-50954e0d72dd/init/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.531401 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5768f785f9-lgxzj_2895b69d-98a4-41f5-8a13-50954e0d72dd/init/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.622230 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5768f785f9-lgxzj_2895b69d-98a4-41f5-8a13-50954e0d72dd/dnsmasq-dns/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.647628 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-zsc8l_0eb2007a-4a59-43fa-980c-e76277c303c1/download-cache-openstack-openstack-cell1/0.log" Nov 28 16:24:06 crc kubenswrapper[4857]: I1128 16:24:06.833848 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c9d01547-142e-4829-b702-ba934ad013e9/glance-log/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.032786 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c9d01547-142e-4829-b702-ba934ad013e9/glance-httpd/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.114361 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_1db6fc24-0e64-4d28-b4c6-dd02be9f6b53/glance-httpd/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.120352 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_1db6fc24-0e64-4d28-b4c6-dd02be9f6b53/glance-log/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.348135 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-55b678f96d-kk66g_3a7b5ab7-2a4b-4b94-bb2c-d4093febdb6c/heat-api/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.475170 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-7c69887759-78cdn_9a42c08c-72d5-416f-8ee8-5cecfc36b4ab/heat-cfnapi/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.540942 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-8685975d9c-mq499_06b77386-fe69-408f-ba29-f70de0a73e6e/heat-engine/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.774153 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5669786d45-d96bq_90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77/horizon/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.800129 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-fwjs4_4608f83d-58f5-4bba-bc64-ba729a74e876/install-certs-openstack-openstack-cell1/0.log" Nov 28 16:24:07 crc kubenswrapper[4857]: I1128 16:24:07.806432 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5669786d45-d96bq_90c3d1b3-b7b6-4bd3-94c6-f67fbcddee77/horizon-log/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.271118 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-csvdf_f6435193-0fdc-46d5-a005-72ac32d221b4/install-os-openstack-openstack-cell1/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.404991 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6bf47649b6-ns6fh_29fc2d85-fb2f-4941-9b48-f90e099492e5/keystone-api/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.469989 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405701-542l7_b2518c7c-8ec1-4d12-bcca-33fa5496fe45/keystone-cron/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.545609 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29405761-9p2mh_783defcf-e557-4a27-8e0f-e19bccf6bdf9/keystone-cron/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.649823 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_f174e778-1bd6-4685-a6de-af54e6fc3329/kube-state-metrics/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.800516 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-6nnr8_72fcedc3-8360-4cff-a0ec-a6e9adc9d54c/libvirt-openstack-openstack-cell1/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.949434 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_1f657912-1eca-43ba-bd70-96ddd9745b78/manila-api-log/0.log" Nov 28 16:24:08 crc kubenswrapper[4857]: I1128 16:24:08.958392 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-api-0_1f657912-1eca-43ba-bd70-96ddd9745b78/manila-api/0.log" Nov 28 16:24:09 crc kubenswrapper[4857]: I1128 16:24:09.142822 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10/manila-scheduler/0.log" Nov 28 16:24:09 crc kubenswrapper[4857]: I1128 16:24:09.176977 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-scheduler-0_93e5c3ff-7b8f-4a98-93d3-2753bbb5dc10/probe/0.log" Nov 28 16:24:09 crc kubenswrapper[4857]: I1128 16:24:09.253970 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_904e6cef-0d88-4f3d-8b62-403596acc8b0/manila-share/0.log" Nov 28 16:24:09 crc kubenswrapper[4857]: I1128 16:24:09.290368 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_manila-share-share1-0_904e6cef-0d88-4f3d-8b62-403596acc8b0/probe/0.log" Nov 28 16:24:09 crc kubenswrapper[4857]: I1128 16:24:09.668499 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-86b5dbcd7c-xktg6_214b3bcb-b9dd-4d87-b519-7fd66b84658a/neutron-api/0.log" Nov 28 16:24:09 crc kubenswrapper[4857]: I1128 16:24:09.689878 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-86b5dbcd7c-xktg6_214b3bcb-b9dd-4d87-b519-7fd66b84658a/neutron-httpd/0.log" Nov 28 16:24:10 crc kubenswrapper[4857]: I1128 16:24:10.399874 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-hwdxh_d8c62c98-9304-412b-9738-99fb9dad59a6/neutron-dhcp-openstack-openstack-cell1/0.log" Nov 28 16:24:10 crc kubenswrapper[4857]: I1128 16:24:10.400606 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-q4kwz_599f242a-30de-4d34-be24-3695bc92abcf/neutron-metadata-openstack-openstack-cell1/0.log" Nov 28 16:24:10 crc kubenswrapper[4857]: I1128 16:24:10.690613 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-lwbfw_1143dca0-289a-4a62-9437-07ee877305c2/neutron-sriov-openstack-openstack-cell1/0.log" Nov 28 16:24:10 crc kubenswrapper[4857]: I1128 16:24:10.801206 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_f18de81f-0f74-4296-b62e-f1aea60e4f67/nova-api-api/0.log" Nov 28 16:24:10 crc kubenswrapper[4857]: I1128 16:24:10.897013 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_f18de81f-0f74-4296-b62e-f1aea60e4f67/nova-api-log/0.log" Nov 28 16:24:11 crc kubenswrapper[4857]: I1128 16:24:11.046525 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_2df57cfe-5ac9-4a8c-83c9-f3bad8317f1d/nova-cell0-conductor-conductor/0.log" Nov 28 16:24:11 crc kubenswrapper[4857]: I1128 16:24:11.512610 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_c5b22e7a-87b1-421d-9c28-35afaefb3808/nova-cell1-conductor-conductor/0.log" Nov 28 16:24:11 crc kubenswrapper[4857]: I1128 16:24:11.553287 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_d5abb31a-459c-48f7-bb30-eaee2b73da5a/nova-cell1-novncproxy-novncproxy/0.log" Nov 28 16:24:11 crc kubenswrapper[4857]: I1128 16:24:11.883045 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell5kw9w_6bb7d13b-f954-4298-b9e9-3b5d36f8591c/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Nov 28 16:24:11 crc kubenswrapper[4857]: I1128 16:24:11.900822 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-fqh98_dbb3905a-b817-4261-88d3-9b8bd0d12548/nova-cell1-openstack-openstack-cell1/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.200325 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a02553cd-561a-4d5a-a986-076342a5430b/nova-metadata-log/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.201118 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_a02553cd-561a-4d5a-a986-076342a5430b/nova-metadata-metadata/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.368416 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_b4ddd466-5f9f-4920-a84a-63ad934e4e74/nova-scheduler-scheduler/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.472218 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-85d89c55f6-24c4g_a0be8fcd-dab0-441a-9bab-d934ea59337a/init/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.693556 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-85d89c55f6-24c4g_a0be8fcd-dab0-441a-9bab-d934ea59337a/init/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.710372 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-85d89c55f6-24c4g_a0be8fcd-dab0-441a-9bab-d934ea59337a/octavia-api-provider-agent/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.955689 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-9sgx4_53410811-937e-4b3e-94fa-e2b2eb63468d/init/0.log" Nov 28 16:24:12 crc kubenswrapper[4857]: I1128 16:24:12.980633 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-85d89c55f6-24c4g_a0be8fcd-dab0-441a-9bab-d934ea59337a/octavia-api/0.log" Nov 28 16:24:13 crc kubenswrapper[4857]: I1128 16:24:13.350262 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-9sgx4_53410811-937e-4b3e-94fa-e2b2eb63468d/octavia-healthmanager/0.log" Nov 28 16:24:13 crc kubenswrapper[4857]: I1128 16:24:13.378216 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-lrkh5_3e200ed8-6d6b-454a-9146-5f86e5892e6d/init/0.log" Nov 28 16:24:13 crc kubenswrapper[4857]: I1128 16:24:13.394344 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-9sgx4_53410811-937e-4b3e-94fa-e2b2eb63468d/init/0.log" Nov 28 16:24:13 crc kubenswrapper[4857]: I1128 16:24:13.679691 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-lrkh5_3e200ed8-6d6b-454a-9146-5f86e5892e6d/init/0.log" Nov 28 16:24:13 crc kubenswrapper[4857]: I1128 16:24:13.691416 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-lrkh5_3e200ed8-6d6b-454a-9146-5f86e5892e6d/octavia-housekeeping/0.log" Nov 28 16:24:13 crc kubenswrapper[4857]: I1128 16:24:13.809756 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-xj54b_7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c/init/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.033073 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-xj54b_7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c/init/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.034064 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-59f8cff499-xj54b_7d6d5f39-e9b3-4bf1-a47c-e3315e5c456c/octavia-amphora-httpd/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.068522 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7w8sh_493b0f4e-047e-4f57-bdc7-fd0e2030a799/init/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.294705 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7w8sh_493b0f4e-047e-4f57-bdc7-fd0e2030a799/octavia-rsyslog/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.361042 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-7w8sh_493b0f4e-047e-4f57-bdc7-fd0e2030a799/init/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.374044 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-r6h2l_c22b0407-f4df-49cf-9dc4-9b8c78e835eb/init/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.544232 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-r6h2l_c22b0407-f4df-49cf-9dc4-9b8c78e835eb/init/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.624169 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_30b56f73-5bef-414c-bb50-9b8d7f5afd91/mysql-bootstrap/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.723619 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-r6h2l_c22b0407-f4df-49cf-9dc4-9b8c78e835eb/octavia-worker/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.823341 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_30b56f73-5bef-414c-bb50-9b8d7f5afd91/mysql-bootstrap/0.log" Nov 28 16:24:14 crc kubenswrapper[4857]: I1128 16:24:14.867256 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_30b56f73-5bef-414c-bb50-9b8d7f5afd91/galera/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.009729 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f952e7bb-8570-4caf-bb87-e84cd31506b9/mysql-bootstrap/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.257008 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f952e7bb-8570-4caf-bb87-e84cd31506b9/mysql-bootstrap/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.307195 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_2571cbd5-f5c5-4525-a259-05351f59b9bb/openstackclient/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.366506 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_f952e7bb-8570-4caf-bb87-e84cd31506b9/galera/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.508855 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-hg2vm_5a653e57-11d1-4d14-9d0d-d8afbbfcffbc/ovn-controller/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.608622 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-zrjff_39b5a11e-b819-4562-9b61-82421fcabe18/openstack-network-exporter/0.log" Nov 28 16:24:15 crc kubenswrapper[4857]: I1128 16:24:15.834651 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-nv5l9_2701173a-d9ca-4d1b-8e1a-a0ef8102f92d/ovsdb-server-init/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.028135 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-nv5l9_2701173a-d9ca-4d1b-8e1a-a0ef8102f92d/ovs-vswitchd/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.060590 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-nv5l9_2701173a-d9ca-4d1b-8e1a-a0ef8102f92d/ovsdb-server/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.061057 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-nv5l9_2701173a-d9ca-4d1b-8e1a-a0ef8102f92d/ovsdb-server-init/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.273326 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_09121b50-2dda-44c5-8ba4-3f9a1f55e8e6/openstack-network-exporter/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.328625 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_09121b50-2dda-44c5-8ba4-3f9a1f55e8e6/ovn-northd/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.563110 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-vxtjn_bd5526b8-d3ea-4744-9a5c-ec0c252b9391/ovn-openstack-openstack-cell1/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.573699 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c/openstack-network-exporter/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.638416 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_87cbdfd2-18b6-45a9-ab8d-e6fd87fb317c/ovsdbserver-nb/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.792715 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_5526a303-a350-4271-8e45-36881affbd04/openstack-network-exporter/0.log" Nov 28 16:24:16 crc kubenswrapper[4857]: I1128 16:24:16.869538 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_5526a303-a350-4271-8e45-36881affbd04/ovsdbserver-nb/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.039230 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_5369c4e6-8532-4d9c-88a7-7fbb5c22d44e/openstack-network-exporter/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.149320 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_5369c4e6-8532-4d9c-88a7-7fbb5c22d44e/ovsdbserver-nb/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.251279 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4b81238d-98a7-4c18-930f-6bcce93525e9/openstack-network-exporter/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.337500 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_4b81238d-98a7-4c18-930f-6bcce93525e9/ovsdbserver-sb/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.485527 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_215aa7d0-2437-4c1a-ab37-80be33b6fca0/ovsdbserver-sb/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.522635 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_215aa7d0-2437-4c1a-ab37-80be33b6fca0/openstack-network-exporter/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.681153 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_d4f36444-104b-4cb9-befb-02e72742474b/openstack-network-exporter/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.740294 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_d4f36444-104b-4cb9-befb-02e72742474b/ovsdbserver-sb/0.log" Nov 28 16:24:17 crc kubenswrapper[4857]: I1128 16:24:17.952247 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7c8bb4b4b8-6cp59_31f48ac3-8aca-4d6e-887f-d75982095216/placement-api/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.006283 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-7c8bb4b4b8-6cp59_31f48ac3-8aca-4d6e-887f-d75982095216/placement-log/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.481849 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_04369fad-6e83-4f50-be3e-e37f8c2d6b60/init-config-reloader/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.533710 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-cz8fl2_28b59422-5490-4fcc-a7cd-a4e11842e1d4/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.733656 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_04369fad-6e83-4f50-be3e-e37f8c2d6b60/init-config-reloader/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.745649 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_04369fad-6e83-4f50-be3e-e37f8c2d6b60/config-reloader/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.753722 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_04369fad-6e83-4f50-be3e-e37f8c2d6b60/prometheus/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.833428 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_04369fad-6e83-4f50-be3e-e37f8c2d6b60/thanos-sidecar/0.log" Nov 28 16:24:18 crc kubenswrapper[4857]: I1128 16:24:18.977188 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bf2f5ade-a981-4a9f-891c-83fd9bb77414/setup-container/0.log" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.030983 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5dx"] Nov 28 16:24:19 crc kubenswrapper[4857]: E1128 16:24:19.031735 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="299af1a6-6f2e-4aff-82c6-a4369c0293c3" containerName="container-00" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.031826 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="299af1a6-6f2e-4aff-82c6-a4369c0293c3" containerName="container-00" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.032127 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="299af1a6-6f2e-4aff-82c6-a4369c0293c3" containerName="container-00" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.033814 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.051007 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5dx"] Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.232940 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsch4\" (UniqueName: \"kubernetes.io/projected/6f3ce569-6d83-4e40-b471-21f78f28a1cc-kube-api-access-wsch4\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.233469 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-catalog-content\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.233536 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-utilities\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.318045 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bf2f5ade-a981-4a9f-891c-83fd9bb77414/setup-container/0.log" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.335411 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsch4\" (UniqueName: \"kubernetes.io/projected/6f3ce569-6d83-4e40-b471-21f78f28a1cc-kube-api-access-wsch4\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.335510 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-catalog-content\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.335626 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-utilities\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.336407 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-catalog-content\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.336456 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-utilities\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.360527 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bf2f5ade-a981-4a9f-891c-83fd9bb77414/rabbitmq/0.log" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.360530 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsch4\" (UniqueName: \"kubernetes.io/projected/6f3ce569-6d83-4e40-b471-21f78f28a1cc-kube-api-access-wsch4\") pod \"redhat-marketplace-nj5dx\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.366761 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.380738 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f/setup-container/0.log" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.703279 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f/setup-container/0.log" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.765866 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-st7gd_e63beefb-f92b-4109-890b-209999158ac7/reboot-os-openstack-openstack-cell1/0.log" Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.878629 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5dx"] Nov 28 16:24:19 crc kubenswrapper[4857]: I1128 16:24:19.924765 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_28c5e6ac-e77c-4bc0-8d28-5f323f7fcb6f/rabbitmq/0.log" Nov 28 16:24:20 crc kubenswrapper[4857]: I1128 16:24:20.594638 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-6mlkc_4f163911-5f98-47d3-9771-8e1aa7175cff/ssh-known-hosts-openstack/0.log" Nov 28 16:24:20 crc kubenswrapper[4857]: I1128 16:24:20.606504 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_55f0c5ec-efb9-4049-82fb-2b33d503e84e/memcached/0.log" Nov 28 16:24:20 crc kubenswrapper[4857]: I1128 16:24:20.628149 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-cn6q8_4f4a22c2-c87d-46d3-a6b0-72968f2661af/run-os-openstack-openstack-cell1/0.log" Nov 28 16:24:20 crc kubenswrapper[4857]: I1128 16:24:20.773843 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerStarted","Data":"73b84d60e8622793cc00e0cdb1ee79960e9dc13af859527b4e3ab6a97ba85b53"} Nov 28 16:24:20 crc kubenswrapper[4857]: I1128 16:24:20.803623 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-xdxr5_afa81231-caa2-4a60-b28f-77409c0837e2/telemetry-openstack-openstack-cell1/0.log" Nov 28 16:24:20 crc kubenswrapper[4857]: I1128 16:24:20.852288 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-9885f_32fb95d9-2633-4efe-bcc4-e74e6fb9bd12/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Nov 28 16:24:21 crc kubenswrapper[4857]: I1128 16:24:21.068171 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-xt5v2_64695c5c-02dd-4c82-bbc7-aa7262c16397/validate-network-openstack-openstack-cell1/0.log" Nov 28 16:24:21 crc kubenswrapper[4857]: I1128 16:24:21.784412 4857 generic.go:334] "Generic (PLEG): container finished" podID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerID="0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048" exitCode=0 Nov 28 16:24:21 crc kubenswrapper[4857]: I1128 16:24:21.784517 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerDied","Data":"0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048"} Nov 28 16:24:24 crc kubenswrapper[4857]: I1128 16:24:24.898310 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerStarted","Data":"96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558"} Nov 28 16:24:25 crc kubenswrapper[4857]: I1128 16:24:25.910707 4857 generic.go:334] "Generic (PLEG): container finished" podID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerID="96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558" exitCode=0 Nov 28 16:24:25 crc kubenswrapper[4857]: I1128 16:24:25.911148 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerDied","Data":"96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558"} Nov 28 16:24:26 crc kubenswrapper[4857]: I1128 16:24:26.923630 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerStarted","Data":"c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb"} Nov 28 16:24:26 crc kubenswrapper[4857]: I1128 16:24:26.949591 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-nj5dx" podStartSLOduration=4.259208786 podStartE2EDuration="8.949566045s" podCreationTimestamp="2025-11-28 16:24:18 +0000 UTC" firstStartedPulling="2025-11-28 16:24:21.786302824 +0000 UTC m=+10511.910244261" lastFinishedPulling="2025-11-28 16:24:26.476660083 +0000 UTC m=+10516.600601520" observedRunningTime="2025-11-28 16:24:26.938890923 +0000 UTC m=+10517.062832360" watchObservedRunningTime="2025-11-28 16:24:26.949566045 +0000 UTC m=+10517.073507482" Nov 28 16:24:29 crc kubenswrapper[4857]: I1128 16:24:29.367595 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:29 crc kubenswrapper[4857]: I1128 16:24:29.367987 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:29 crc kubenswrapper[4857]: I1128 16:24:29.427093 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:39 crc kubenswrapper[4857]: I1128 16:24:39.423043 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:39 crc kubenswrapper[4857]: I1128 16:24:39.495310 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5dx"] Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.053092 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-nj5dx" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="registry-server" containerID="cri-o://c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb" gracePeriod=2 Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.554280 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.637213 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-catalog-content\") pod \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.637317 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wsch4\" (UniqueName: \"kubernetes.io/projected/6f3ce569-6d83-4e40-b471-21f78f28a1cc-kube-api-access-wsch4\") pod \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.637542 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-utilities\") pod \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\" (UID: \"6f3ce569-6d83-4e40-b471-21f78f28a1cc\") " Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.638547 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-utilities" (OuterVolumeSpecName: "utilities") pod "6f3ce569-6d83-4e40-b471-21f78f28a1cc" (UID: "6f3ce569-6d83-4e40-b471-21f78f28a1cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.644063 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f3ce569-6d83-4e40-b471-21f78f28a1cc-kube-api-access-wsch4" (OuterVolumeSpecName: "kube-api-access-wsch4") pod "6f3ce569-6d83-4e40-b471-21f78f28a1cc" (UID: "6f3ce569-6d83-4e40-b471-21f78f28a1cc"). InnerVolumeSpecName "kube-api-access-wsch4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.660780 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6f3ce569-6d83-4e40-b471-21f78f28a1cc" (UID: "6f3ce569-6d83-4e40-b471-21f78f28a1cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.739977 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.740015 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f3ce569-6d83-4e40-b471-21f78f28a1cc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:40 crc kubenswrapper[4857]: I1128 16:24:40.740027 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wsch4\" (UniqueName: \"kubernetes.io/projected/6f3ce569-6d83-4e40-b471-21f78f28a1cc-kube-api-access-wsch4\") on node \"crc\" DevicePath \"\"" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.064840 4857 generic.go:334] "Generic (PLEG): container finished" podID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerID="c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb" exitCode=0 Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.064889 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerDied","Data":"c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb"} Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.064910 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-nj5dx" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.064936 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-nj5dx" event={"ID":"6f3ce569-6d83-4e40-b471-21f78f28a1cc","Type":"ContainerDied","Data":"73b84d60e8622793cc00e0cdb1ee79960e9dc13af859527b4e3ab6a97ba85b53"} Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.064978 4857 scope.go:117] "RemoveContainer" containerID="c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.105289 4857 scope.go:117] "RemoveContainer" containerID="96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.107731 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5dx"] Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.118602 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-nj5dx"] Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.133082 4857 scope.go:117] "RemoveContainer" containerID="0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.174622 4857 scope.go:117] "RemoveContainer" containerID="c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb" Nov 28 16:24:41 crc kubenswrapper[4857]: E1128 16:24:41.174989 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb\": container with ID starting with c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb not found: ID does not exist" containerID="c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.175023 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb"} err="failed to get container status \"c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb\": rpc error: code = NotFound desc = could not find container \"c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb\": container with ID starting with c10b67dea21278b2edbb21505774d95f52f233d43ab25eae5688c7a73a86fceb not found: ID does not exist" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.175043 4857 scope.go:117] "RemoveContainer" containerID="96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558" Nov 28 16:24:41 crc kubenswrapper[4857]: E1128 16:24:41.175352 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558\": container with ID starting with 96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558 not found: ID does not exist" containerID="96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.175401 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558"} err="failed to get container status \"96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558\": rpc error: code = NotFound desc = could not find container \"96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558\": container with ID starting with 96440837c7e2711316fdeb588b5fd834ae6bece06a23310604fe9eabd3393558 not found: ID does not exist" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.175430 4857 scope.go:117] "RemoveContainer" containerID="0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048" Nov 28 16:24:41 crc kubenswrapper[4857]: E1128 16:24:41.175792 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048\": container with ID starting with 0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048 not found: ID does not exist" containerID="0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048" Nov 28 16:24:41 crc kubenswrapper[4857]: I1128 16:24:41.175873 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048"} err="failed to get container status \"0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048\": rpc error: code = NotFound desc = could not find container \"0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048\": container with ID starting with 0ff5a64120f77c80d6a002c9b88c5181e4ec1f1c54843007ea40d2f2476dc048 not found: ID does not exist" Nov 28 16:24:42 crc kubenswrapper[4857]: I1128 16:24:42.243324 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" path="/var/lib/kubelet/pods/6f3ce569-6d83-4e40-b471-21f78f28a1cc/volumes" Nov 28 16:24:42 crc kubenswrapper[4857]: I1128 16:24:42.717011 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/util/0.log" Nov 28 16:24:42 crc kubenswrapper[4857]: I1128 16:24:42.879896 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/pull/0.log" Nov 28 16:24:42 crc kubenswrapper[4857]: I1128 16:24:42.917565 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/util/0.log" Nov 28 16:24:42 crc kubenswrapper[4857]: I1128 16:24:42.984327 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/pull/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.152089 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/pull/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.162536 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/util/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.164348 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_2891fe87fa846e4beda4a3e9787beaa59d5f0ed474039e949564618069wvs2m_9e893541-5110-452a-9b42-c37bff12861d/extract/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.311163 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-w2tjv_5dc1083e-b6ad-4424-982a-e85aeac54c1f/kube-rbac-proxy/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.454739 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-w2tjv_5dc1083e-b6ad-4424-982a-e85aeac54c1f/manager/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.463258 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-l44sf_77d1fd9b-46fd-4df8-bfd4-2c735e2d7504/kube-rbac-proxy/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.626534 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-l44sf_77d1fd9b-46fd-4df8-bfd4-2c735e2d7504/manager/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.630851 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-gq64h_c226ccd3-f651-4ad7-91c4-4fa0f194c415/kube-rbac-proxy/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.699408 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-gq64h_c226ccd3-f651-4ad7-91c4-4fa0f194c415/manager/0.log" Nov 28 16:24:43 crc kubenswrapper[4857]: I1128 16:24:43.809693 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-bjf4d_057e15a1-c387-4614-b809-003fcbc1053d/kube-rbac-proxy/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.019041 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-bjf4d_057e15a1-c387-4614-b809-003fcbc1053d/manager/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.062725 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-2hbkq_47d1bb5d-1b83-4ff2-b760-a08e56cce245/manager/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.075550 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-2hbkq_47d1bb5d-1b83-4ff2-b760-a08e56cce245/kube-rbac-proxy/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.260680 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-f5zhv_0fb31297-9385-4426-a232-463e60388e72/kube-rbac-proxy/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.272875 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-f5zhv_0fb31297-9385-4426-a232-463e60388e72/manager/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.423045 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-8rl4w_5293402f-6ffd-4df9-853f-1c73a8d8b887/kube-rbac-proxy/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.502039 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-fstp8_c62fc075-98ca-4cda-b5a6-be5da222c5c3/kube-rbac-proxy/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.666741 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-fstp8_c62fc075-98ca-4cda-b5a6-be5da222c5c3/manager/0.log" Nov 28 16:24:44 crc kubenswrapper[4857]: I1128 16:24:44.774811 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-8rl4w_5293402f-6ffd-4df9-853f-1c73a8d8b887/manager/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.189559 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-ntrps_b51da38a-adbe-4c14-86c8-7294f399b971/kube-rbac-proxy/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.274726 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-ntrps_b51da38a-adbe-4c14-86c8-7294f399b971/manager/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.408804 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-bff9n_d523105a-fbc2-47fc-ba6d-1738679751bc/kube-rbac-proxy/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.454737 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-bff9n_d523105a-fbc2-47fc-ba6d-1738679751bc/manager/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.478110 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-lk8hf_124ac5de-0ba4-4863-a225-750a2fb5570f/kube-rbac-proxy/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.651245 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-lk8hf_124ac5de-0ba4-4863-a225-750a2fb5570f/manager/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.679266 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-4l8jv_1a693631-7842-449a-98ab-9b3668d8bbf6/kube-rbac-proxy/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.749240 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-4l8jv_1a693631-7842-449a-98ab-9b3668d8bbf6/manager/0.log" Nov 28 16:24:45 crc kubenswrapper[4857]: I1128 16:24:45.905823 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-b8hn9_7187e0f3-1feb-401a-bda3-900798b760c1/kube-rbac-proxy/0.log" Nov 28 16:24:46 crc kubenswrapper[4857]: I1128 16:24:46.158369 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-brvlk_ec049f1f-9c56-4593-9953-3f18a7c90887/kube-rbac-proxy/0.log" Nov 28 16:24:46 crc kubenswrapper[4857]: I1128 16:24:46.174043 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-b8hn9_7187e0f3-1feb-401a-bda3-900798b760c1/manager/0.log" Nov 28 16:24:46 crc kubenswrapper[4857]: I1128 16:24:46.228474 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-brvlk_ec049f1f-9c56-4593-9953-3f18a7c90887/manager/0.log" Nov 28 16:24:46 crc kubenswrapper[4857]: I1128 16:24:46.386189 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt_85e2c248-e4f5-4192-bef2-e14c956e16f7/kube-rbac-proxy/0.log" Nov 28 16:24:46 crc kubenswrapper[4857]: I1128 16:24:46.401486 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5fcdb54b6bfdkpt_85e2c248-e4f5-4192-bef2-e14c956e16f7/manager/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.141176 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-78449c6ff-gkhhg_958cdbc4-1aa1-48b1-b84e-04f36c643455/operator/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.202417 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-hjxw9_cbdbcd68-3d0e-4728-8c51-250eb91c0ac5/registry-server/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.384051 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-qcr9m_cf39f47e-7284-4fae-949d-1b4de9e97751/kube-rbac-proxy/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.509520 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vbr94_d7be9e32-53ca-40c8-8f12-8ee0ed5e924c/kube-rbac-proxy/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.547437 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-qcr9m_cf39f47e-7284-4fae-949d-1b4de9e97751/manager/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.638384 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-vbr94_d7be9e32-53ca-40c8-8f12-8ee0ed5e924c/manager/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.755170 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-9mbjq_56d61304-2a03-427c-884e-cb000f6d24ea/operator/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.872765 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-8f8nb_ea510000-70d9-4371-b791-6872e8d6905c/kube-rbac-proxy/0.log" Nov 28 16:24:47 crc kubenswrapper[4857]: I1128 16:24:47.999805 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-8f8nb_ea510000-70d9-4371-b791-6872e8d6905c/manager/0.log" Nov 28 16:24:48 crc kubenswrapper[4857]: I1128 16:24:48.053723 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-xndsj_9579cba1-740c-4675-beb4-858ee406b22b/kube-rbac-proxy/0.log" Nov 28 16:24:48 crc kubenswrapper[4857]: I1128 16:24:48.230779 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gcvrp_dcfce002-7195-42ef-932f-c5a8eebeb87f/kube-rbac-proxy/0.log" Nov 28 16:24:48 crc kubenswrapper[4857]: I1128 16:24:48.282896 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-gcvrp_dcfce002-7195-42ef-932f-c5a8eebeb87f/manager/0.log" Nov 28 16:24:48 crc kubenswrapper[4857]: I1128 16:24:48.349848 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-xndsj_9579cba1-740c-4675-beb4-858ee406b22b/manager/0.log" Nov 28 16:24:48 crc kubenswrapper[4857]: I1128 16:24:48.510415 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-vlcd8_8102fecd-5745-44b9-aa1c-37bd0662a28d/kube-rbac-proxy/0.log" Nov 28 16:24:48 crc kubenswrapper[4857]: I1128 16:24:48.559564 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-vlcd8_8102fecd-5745-44b9-aa1c-37bd0662a28d/manager/0.log" Nov 28 16:24:49 crc kubenswrapper[4857]: I1128 16:24:49.293051 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7894b8b8d-fxkwb_2b9b746d-698b-4ac1-b8e7-2b431b230985/manager/0.log" Nov 28 16:25:08 crc kubenswrapper[4857]: I1128 16:25:08.229875 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-4xnj2_d39d20be-e63a-4fef-bde2-6f4e76051828/control-plane-machine-set-operator/0.log" Nov 28 16:25:08 crc kubenswrapper[4857]: I1128 16:25:08.408752 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-nm6c5_0723fd39-fd72-4aae-a8ac-4a69a9cea44e/kube-rbac-proxy/0.log" Nov 28 16:25:08 crc kubenswrapper[4857]: I1128 16:25:08.412849 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-nm6c5_0723fd39-fd72-4aae-a8ac-4a69a9cea44e/machine-api-operator/0.log" Nov 28 16:25:20 crc kubenswrapper[4857]: I1128 16:25:20.822457 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-rdmt6_dfbe1d42-d65f-4d9c-851a-50319b7742bd/cert-manager-controller/0.log" Nov 28 16:25:20 crc kubenswrapper[4857]: I1128 16:25:20.933219 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-jm54s_b8471048-2df4-4c56-bfce-f8e8c377b85f/cert-manager-cainjector/0.log" Nov 28 16:25:20 crc kubenswrapper[4857]: I1128 16:25:20.984454 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-6kbg8_de8a159c-7c49-47a6-9dd6-34d23aa529e0/cert-manager-webhook/0.log" Nov 28 16:25:33 crc kubenswrapper[4857]: I1128 16:25:33.704156 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-6m9c9_0b82e06c-e622-4118-955f-3d191b41077e/nmstate-console-plugin/0.log" Nov 28 16:25:33 crc kubenswrapper[4857]: I1128 16:25:33.846697 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-927cv_89385bef-e7f9-4889-b992-7049d6d84f97/nmstate-handler/0.log" Nov 28 16:25:33 crc kubenswrapper[4857]: I1128 16:25:33.907111 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-vrprc_37a08f60-3d10-4701-95b8-7018de948df4/kube-rbac-proxy/0.log" Nov 28 16:25:33 crc kubenswrapper[4857]: I1128 16:25:33.989044 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-vrprc_37a08f60-3d10-4701-95b8-7018de948df4/nmstate-metrics/0.log" Nov 28 16:25:34 crc kubenswrapper[4857]: I1128 16:25:34.085613 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-d2ffz_9ccf01e3-1b00-414a-aa76-5a391a57e76e/nmstate-operator/0.log" Nov 28 16:25:34 crc kubenswrapper[4857]: I1128 16:25:34.183638 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-pwxgk_77ac9288-6383-4345-bffd-2aadbec644af/nmstate-webhook/0.log" Nov 28 16:25:41 crc kubenswrapper[4857]: I1128 16:25:41.309056 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:25:41 crc kubenswrapper[4857]: I1128 16:25:41.309643 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:25:49 crc kubenswrapper[4857]: I1128 16:25:49.879699 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-h9ldf_301b4f9c-0612-40ff-8009-cbf39f36e85e/kube-rbac-proxy/0.log" Nov 28 16:25:50 crc kubenswrapper[4857]: I1128 16:25:50.237255 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-frr-files/0.log" Nov 28 16:25:50 crc kubenswrapper[4857]: I1128 16:25:50.272830 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-h9ldf_301b4f9c-0612-40ff-8009-cbf39f36e85e/controller/0.log" Nov 28 16:25:50 crc kubenswrapper[4857]: I1128 16:25:50.756051 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-reloader/0.log" Nov 28 16:25:50 crc kubenswrapper[4857]: I1128 16:25:50.773213 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-frr-files/0.log" Nov 28 16:25:50 crc kubenswrapper[4857]: I1128 16:25:50.821341 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-metrics/0.log" Nov 28 16:25:50 crc kubenswrapper[4857]: I1128 16:25:50.841523 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-reloader/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.013639 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-metrics/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.027108 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-reloader/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.065209 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-metrics/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.065499 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-frr-files/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.296730 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-frr-files/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.313508 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-reloader/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.324479 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/cp-metrics/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.345398 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/controller/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.492508 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/kube-rbac-proxy/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.515872 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/frr-metrics/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.546088 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/kube-rbac-proxy-frr/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.745268 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/reloader/0.log" Nov 28 16:25:51 crc kubenswrapper[4857]: I1128 16:25:51.914874 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-jdrbw_c325af37-018c-4ea2-b63d-4ab931a9a17c/frr-k8s-webhook-server/0.log" Nov 28 16:25:52 crc kubenswrapper[4857]: I1128 16:25:52.150803 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-57db9444fc-x2xfl_2819a401-96f9-4d83-a331-70e053cb7226/manager/0.log" Nov 28 16:25:52 crc kubenswrapper[4857]: I1128 16:25:52.317300 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-f48cdcb6d-5fgqz_c72814a1-86c3-4023-94fe-c492625d1c6c/webhook-server/0.log" Nov 28 16:25:52 crc kubenswrapper[4857]: I1128 16:25:52.432599 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-566t2_e71259d7-d2b0-45eb-ad34-2c9c7687b885/kube-rbac-proxy/0.log" Nov 28 16:25:53 crc kubenswrapper[4857]: I1128 16:25:53.736401 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-566t2_e71259d7-d2b0-45eb-ad34-2c9c7687b885/speaker/0.log" Nov 28 16:25:55 crc kubenswrapper[4857]: I1128 16:25:55.324612 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-nxzbf_85bce704-7efb-4e9c-a6b0-c2ebbe46877d/frr/0.log" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.696903 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ll7hf"] Nov 28 16:26:00 crc kubenswrapper[4857]: E1128 16:26:00.697998 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="registry-server" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.698031 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="registry-server" Nov 28 16:26:00 crc kubenswrapper[4857]: E1128 16:26:00.698049 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="extract-utilities" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.698055 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="extract-utilities" Nov 28 16:26:00 crc kubenswrapper[4857]: E1128 16:26:00.698115 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="extract-content" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.698123 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="extract-content" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.698385 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f3ce569-6d83-4e40-b471-21f78f28a1cc" containerName="registry-server" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.699920 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.714831 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ll7hf"] Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.898360 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-utilities\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.898434 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-catalog-content\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:00 crc kubenswrapper[4857]: I1128 16:26:00.898463 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhzq5\" (UniqueName: \"kubernetes.io/projected/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-kube-api-access-rhzq5\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.000292 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-utilities\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.000376 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-catalog-content\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.000405 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhzq5\" (UniqueName: \"kubernetes.io/projected/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-kube-api-access-rhzq5\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.001009 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-catalog-content\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.000929 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-utilities\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.474213 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhzq5\" (UniqueName: \"kubernetes.io/projected/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-kube-api-access-rhzq5\") pod \"certified-operators-ll7hf\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:01 crc kubenswrapper[4857]: I1128 16:26:01.667513 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:02 crc kubenswrapper[4857]: I1128 16:26:02.148303 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ll7hf"] Nov 28 16:26:02 crc kubenswrapper[4857]: I1128 16:26:02.875138 4857 generic.go:334] "Generic (PLEG): container finished" podID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerID="aef726ef7b15e00c7fe80183c827f341521e6976bc27ff600cf2276adcef7075" exitCode=0 Nov 28 16:26:02 crc kubenswrapper[4857]: I1128 16:26:02.875350 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerDied","Data":"aef726ef7b15e00c7fe80183c827f341521e6976bc27ff600cf2276adcef7075"} Nov 28 16:26:02 crc kubenswrapper[4857]: I1128 16:26:02.875647 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerStarted","Data":"df12cb065cf85332413f214ff1aeb5543a5d5586594b75ee533e29e6cf9fc867"} Nov 28 16:26:02 crc kubenswrapper[4857]: I1128 16:26:02.877422 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:26:03 crc kubenswrapper[4857]: I1128 16:26:03.888433 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerStarted","Data":"149c08a1820a1662fc783224088a02701ab549279058999fbcbdd7e653449368"} Nov 28 16:26:04 crc kubenswrapper[4857]: I1128 16:26:04.902876 4857 generic.go:334] "Generic (PLEG): container finished" podID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerID="149c08a1820a1662fc783224088a02701ab549279058999fbcbdd7e653449368" exitCode=0 Nov 28 16:26:04 crc kubenswrapper[4857]: I1128 16:26:04.905613 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerDied","Data":"149c08a1820a1662fc783224088a02701ab549279058999fbcbdd7e653449368"} Nov 28 16:26:06 crc kubenswrapper[4857]: I1128 16:26:06.924094 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerStarted","Data":"4e348098a3ae03bdc47b79b02bd506e4b4cccfd0ccfaf36fd3f6f1e09ccab934"} Nov 28 16:26:06 crc kubenswrapper[4857]: I1128 16:26:06.970002 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ll7hf" podStartSLOduration=3.771014213 podStartE2EDuration="6.969979095s" podCreationTimestamp="2025-11-28 16:26:00 +0000 UTC" firstStartedPulling="2025-11-28 16:26:02.877150439 +0000 UTC m=+10613.001091876" lastFinishedPulling="2025-11-28 16:26:06.076115321 +0000 UTC m=+10616.200056758" observedRunningTime="2025-11-28 16:26:06.95653313 +0000 UTC m=+10617.080474567" watchObservedRunningTime="2025-11-28 16:26:06.969979095 +0000 UTC m=+10617.093920532" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.118091 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/util/0.log" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.358324 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/util/0.log" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.390507 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/pull/0.log" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.532451 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/pull/0.log" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.751518 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/util/0.log" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.811577 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/extract/0.log" Nov 28 16:26:08 crc kubenswrapper[4857]: I1128 16:26:08.856316 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931asfgl9_fd2aa4c1-fb15-4e3d-b015-e8d033e3fd1a/pull/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.219598 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/util/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.300596 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/pull/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.305813 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/util/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.328169 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/pull/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.533513 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/util/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.571971 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/pull/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.587428 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212ftqncr_bca267c5-32a8-4b06-8eb4-b19357392900/extract/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.780417 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/util/0.log" Nov 28 16:26:09 crc kubenswrapper[4857]: I1128 16:26:09.990809 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/util/0.log" Nov 28 16:26:10 crc kubenswrapper[4857]: I1128 16:26:10.032533 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/pull/0.log" Nov 28 16:26:10 crc kubenswrapper[4857]: I1128 16:26:10.073882 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/pull/0.log" Nov 28 16:26:10 crc kubenswrapper[4857]: I1128 16:26:10.242201 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/util/0.log" Nov 28 16:26:10 crc kubenswrapper[4857]: I1128 16:26:10.266838 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/extract/0.log" Nov 28 16:26:10 crc kubenswrapper[4857]: I1128 16:26:10.268393 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210mjsgt_acf7905a-757e-4f16-a8bc-6ecbce935582/pull/0.log" Nov 28 16:26:10 crc kubenswrapper[4857]: I1128 16:26:10.481163 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/util/0.log" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.308393 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.308712 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.520772 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/pull/0.log" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.557728 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/util/0.log" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.591815 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/pull/0.log" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.668132 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.668216 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.731523 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.782410 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/util/0.log" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.807886 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/extract/0.log" Nov 28 16:26:11 crc kubenswrapper[4857]: I1128 16:26:11.868005 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f837hw2q_adabc1a1-93dd-4d7a-9971-19c219dafc3c/pull/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.054004 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.093713 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/extract-utilities/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.111531 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ll7hf"] Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.436802 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/extract-utilities/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.457121 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/extract-content/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.457341 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/extract-content/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.674499 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/extract-content/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.694824 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/extract-utilities/0.log" Nov 28 16:26:12 crc kubenswrapper[4857]: I1128 16:26:12.786469 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/extract-utilities/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.564537 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/extract-utilities/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.579150 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/extract-content/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.655723 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/extract-content/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.661591 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-99hjq_a2926817-d9c2-4a87-baa5-58b9483eef00/registry-server/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.870458 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/registry-server/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.915150 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/extract-utilities/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.933556 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ll7hf_9afdaf5e-dc0f-4082-9ec5-5e63b371696c/extract-content/0.log" Nov 28 16:26:13 crc kubenswrapper[4857]: I1128 16:26:13.960923 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/extract-utilities/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.016220 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ll7hf" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="registry-server" containerID="cri-o://4e348098a3ae03bdc47b79b02bd506e4b4cccfd0ccfaf36fd3f6f1e09ccab934" gracePeriod=2 Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.104827 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/extract-content/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.116083 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/extract-content/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.171910 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/extract-utilities/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.379528 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/extract-utilities/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.384132 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/extract-content/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.596020 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-j9xpx_63b3515b-2b80-4d2e-b3a7-9e8ff96223f6/marketplace-operator/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.653183 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/extract-utilities/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.880529 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/extract-utilities/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.910080 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/extract-content/0.log" Nov 28 16:26:14 crc kubenswrapper[4857]: I1128 16:26:14.922188 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/extract-content/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.027505 4857 generic.go:334] "Generic (PLEG): container finished" podID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerID="4e348098a3ae03bdc47b79b02bd506e4b4cccfd0ccfaf36fd3f6f1e09ccab934" exitCode=0 Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.027546 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerDied","Data":"4e348098a3ae03bdc47b79b02bd506e4b4cccfd0ccfaf36fd3f6f1e09ccab934"} Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.158189 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/extract-content/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.240103 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/extract-utilities/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.507007 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/extract-utilities/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.653179 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.722203 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/extract-utilities/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.758106 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-catalog-content\") pod \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.758203 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhzq5\" (UniqueName: \"kubernetes.io/projected/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-kube-api-access-rhzq5\") pod \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.758369 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-utilities\") pod \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\" (UID: \"9afdaf5e-dc0f-4082-9ec5-5e63b371696c\") " Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.764043 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-utilities" (OuterVolumeSpecName: "utilities") pod "9afdaf5e-dc0f-4082-9ec5-5e63b371696c" (UID: "9afdaf5e-dc0f-4082-9ec5-5e63b371696c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.765697 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-kube-api-access-rhzq5" (OuterVolumeSpecName: "kube-api-access-rhzq5") pod "9afdaf5e-dc0f-4082-9ec5-5e63b371696c" (UID: "9afdaf5e-dc0f-4082-9ec5-5e63b371696c"). InnerVolumeSpecName "kube-api-access-rhzq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.819234 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9afdaf5e-dc0f-4082-9ec5-5e63b371696c" (UID: "9afdaf5e-dc0f-4082-9ec5-5e63b371696c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.832549 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-86gwb_2142b9c8-bd24-407d-a45f-b2a9d2019b71/registry-server/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.838418 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/extract-content/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.838508 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/extract-content/0.log" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.862614 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.862659 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhzq5\" (UniqueName: \"kubernetes.io/projected/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-kube-api-access-rhzq5\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:15 crc kubenswrapper[4857]: I1128 16:26:15.862673 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9afdaf5e-dc0f-4082-9ec5-5e63b371696c-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.040680 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ll7hf" event={"ID":"9afdaf5e-dc0f-4082-9ec5-5e63b371696c","Type":"ContainerDied","Data":"df12cb065cf85332413f214ff1aeb5543a5d5586594b75ee533e29e6cf9fc867"} Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.040983 4857 scope.go:117] "RemoveContainer" containerID="4e348098a3ae03bdc47b79b02bd506e4b4cccfd0ccfaf36fd3f6f1e09ccab934" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.041113 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ll7hf" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.070398 4857 scope.go:117] "RemoveContainer" containerID="149c08a1820a1662fc783224088a02701ab549279058999fbcbdd7e653449368" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.077734 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/extract-utilities/0.log" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.098177 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ll7hf"] Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.104417 4857 scope.go:117] "RemoveContainer" containerID="aef726ef7b15e00c7fe80183c827f341521e6976bc27ff600cf2276adcef7075" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.109371 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/extract-content/0.log" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.158349 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ll7hf"] Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.252687 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" path="/var/lib/kubelet/pods/9afdaf5e-dc0f-4082-9ec5-5e63b371696c/volumes" Nov 28 16:26:16 crc kubenswrapper[4857]: I1128 16:26:16.450103 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-mdjp7_9fa5c6f1-d612-4ae1-993f-b2ebc20a5d91/registry-server/0.log" Nov 28 16:26:17 crc kubenswrapper[4857]: I1128 16:26:17.396414 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-t9qsp_7b43afe0-d9e1-4864-9a0e-b7e900755cfd/registry-server/0.log" Nov 28 16:26:30 crc kubenswrapper[4857]: I1128 16:26:30.585298 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-lcqs2_84754d36-7aff-4640-920a-a85f8af97445/prometheus-operator/0.log" Nov 28 16:26:30 crc kubenswrapper[4857]: I1128 16:26:30.772076 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-567d869bcf-dnb4j_64b605ec-ff9f-4050-b8ae-37d88dec247e/prometheus-operator-admission-webhook/0.log" Nov 28 16:26:30 crc kubenswrapper[4857]: I1128 16:26:30.779839 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-567d869bcf-gq4nn_1ece95ae-d2f5-4c91-904a-945060d180a1/prometheus-operator-admission-webhook/0.log" Nov 28 16:26:31 crc kubenswrapper[4857]: I1128 16:26:31.002227 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-6cskd_fc85d7fb-0ce3-43c2-ae33-706045cfd36d/operator/0.log" Nov 28 16:26:31 crc kubenswrapper[4857]: I1128 16:26:31.087426 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-ldvcd_aeec4c3d-13d1-4feb-8b06-6867804d14f1/perses-operator/0.log" Nov 28 16:26:41 crc kubenswrapper[4857]: I1128 16:26:41.309199 4857 patch_prober.go:28] interesting pod/machine-config-daemon-dshsf container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 28 16:26:41 crc kubenswrapper[4857]: I1128 16:26:41.310684 4857 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 28 16:26:41 crc kubenswrapper[4857]: I1128 16:26:41.310833 4857 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" Nov 28 16:26:41 crc kubenswrapper[4857]: I1128 16:26:41.311786 4857 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917"} pod="openshift-machine-config-operator/machine-config-daemon-dshsf" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 28 16:26:41 crc kubenswrapper[4857]: I1128 16:26:41.311983 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerName="machine-config-daemon" containerID="cri-o://8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" gracePeriod=600 Nov 28 16:26:41 crc kubenswrapper[4857]: E1128 16:26:41.436824 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:26:42 crc kubenswrapper[4857]: I1128 16:26:42.415076 4857 generic.go:334] "Generic (PLEG): container finished" podID="5d5445a4-417c-448a-a8a0-4a4f81828aff" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" exitCode=0 Nov 28 16:26:42 crc kubenswrapper[4857]: I1128 16:26:42.415303 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerDied","Data":"8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917"} Nov 28 16:26:42 crc kubenswrapper[4857]: I1128 16:26:42.415737 4857 scope.go:117] "RemoveContainer" containerID="78b43531a5e30b821031568eb8482e23fe9d7dd1f41427c4efa4122a4c8e8ebc" Nov 28 16:26:42 crc kubenswrapper[4857]: I1128 16:26:42.416698 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:26:42 crc kubenswrapper[4857]: E1128 16:26:42.417013 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:26:48 crc kubenswrapper[4857]: E1128 16:26:48.372052 4857 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.222:50454->38.102.83.222:43621: write tcp 38.102.83.222:50454->38.102.83.222:43621: write: broken pipe Nov 28 16:26:53 crc kubenswrapper[4857]: E1128 16:26:53.088983 4857 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.222:50574->38.102.83.222:43621: write tcp 38.102.83.222:50574->38.102.83.222:43621: write: broken pipe Nov 28 16:26:55 crc kubenswrapper[4857]: I1128 16:26:55.228679 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:26:55 crc kubenswrapper[4857]: E1128 16:26:55.229467 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:27:07 crc kubenswrapper[4857]: I1128 16:27:07.228616 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:27:07 crc kubenswrapper[4857]: E1128 16:27:07.229465 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:27:22 crc kubenswrapper[4857]: I1128 16:27:22.229768 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:27:22 crc kubenswrapper[4857]: E1128 16:27:22.230564 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:27:34 crc kubenswrapper[4857]: I1128 16:27:34.229583 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:27:34 crc kubenswrapper[4857]: E1128 16:27:34.230713 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:27:46 crc kubenswrapper[4857]: I1128 16:27:46.229626 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:27:46 crc kubenswrapper[4857]: E1128 16:27:46.230485 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:28:00 crc kubenswrapper[4857]: I1128 16:28:00.239877 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:28:00 crc kubenswrapper[4857]: E1128 16:28:00.240768 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:28:12 crc kubenswrapper[4857]: I1128 16:28:12.229855 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:28:12 crc kubenswrapper[4857]: E1128 16:28:12.230811 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:28:26 crc kubenswrapper[4857]: I1128 16:28:26.229556 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:28:26 crc kubenswrapper[4857]: E1128 16:28:26.230529 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:28:40 crc kubenswrapper[4857]: I1128 16:28:40.241843 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:28:40 crc kubenswrapper[4857]: E1128 16:28:40.242647 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:28:46 crc kubenswrapper[4857]: I1128 16:28:46.731823 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerID="624a0595ecab4e99cecc273ba5d55dbec6292774988bdd5f378268c5a2d4bdd9" exitCode=0 Nov 28 16:28:46 crc kubenswrapper[4857]: I1128 16:28:46.731904 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-swvhw/must-gather-jl8r5" event={"ID":"ab9ccee8-48a0-4596-b4dd-575578b5d6fc","Type":"ContainerDied","Data":"624a0595ecab4e99cecc273ba5d55dbec6292774988bdd5f378268c5a2d4bdd9"} Nov 28 16:28:46 crc kubenswrapper[4857]: I1128 16:28:46.733015 4857 scope.go:117] "RemoveContainer" containerID="624a0595ecab4e99cecc273ba5d55dbec6292774988bdd5f378268c5a2d4bdd9" Nov 28 16:28:46 crc kubenswrapper[4857]: I1128 16:28:46.980199 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-swvhw_must-gather-jl8r5_ab9ccee8-48a0-4596-b4dd-575578b5d6fc/gather/0.log" Nov 28 16:28:52 crc kubenswrapper[4857]: I1128 16:28:52.229388 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:28:52 crc kubenswrapper[4857]: E1128 16:28:52.230077 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:28:58 crc kubenswrapper[4857]: I1128 16:28:58.506297 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-swvhw/must-gather-jl8r5"] Nov 28 16:28:58 crc kubenswrapper[4857]: I1128 16:28:58.507223 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-swvhw/must-gather-jl8r5" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="copy" containerID="cri-o://8928d24d902fc449a4b4aeabaa809a8afdb503dff0a130afe8dfdf71b1ed8fcf" gracePeriod=2 Nov 28 16:28:58 crc kubenswrapper[4857]: I1128 16:28:58.526583 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-swvhw/must-gather-jl8r5"] Nov 28 16:28:58 crc kubenswrapper[4857]: I1128 16:28:58.862827 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-swvhw_must-gather-jl8r5_ab9ccee8-48a0-4596-b4dd-575578b5d6fc/copy/0.log" Nov 28 16:28:58 crc kubenswrapper[4857]: I1128 16:28:58.863381 4857 generic.go:334] "Generic (PLEG): container finished" podID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerID="8928d24d902fc449a4b4aeabaa809a8afdb503dff0a130afe8dfdf71b1ed8fcf" exitCode=143 Nov 28 16:28:59 crc kubenswrapper[4857]: I1128 16:28:59.878498 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-swvhw_must-gather-jl8r5_ab9ccee8-48a0-4596-b4dd-575578b5d6fc/copy/0.log" Nov 28 16:28:59 crc kubenswrapper[4857]: I1128 16:28:59.879566 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec279bc4050ebeafcd5efac6cd811af7fc7dab0c3ddf60a42b40bec8d0aea785" Nov 28 16:28:59 crc kubenswrapper[4857]: I1128 16:28:59.957059 4857 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-swvhw_must-gather-jl8r5_ab9ccee8-48a0-4596-b4dd-575578b5d6fc/copy/0.log" Nov 28 16:28:59 crc kubenswrapper[4857]: I1128 16:28:59.957573 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.079694 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-must-gather-output\") pod \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.079865 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mv4nm\" (UniqueName: \"kubernetes.io/projected/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-kube-api-access-mv4nm\") pod \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\" (UID: \"ab9ccee8-48a0-4596-b4dd-575578b5d6fc\") " Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.086174 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-kube-api-access-mv4nm" (OuterVolumeSpecName: "kube-api-access-mv4nm") pod "ab9ccee8-48a0-4596-b4dd-575578b5d6fc" (UID: "ab9ccee8-48a0-4596-b4dd-575578b5d6fc"). InnerVolumeSpecName "kube-api-access-mv4nm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.185486 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mv4nm\" (UniqueName: \"kubernetes.io/projected/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-kube-api-access-mv4nm\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.280173 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ab9ccee8-48a0-4596-b4dd-575578b5d6fc" (UID: "ab9ccee8-48a0-4596-b4dd-575578b5d6fc"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.293590 4857 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ab9ccee8-48a0-4596-b4dd-575578b5d6fc-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:00 crc kubenswrapper[4857]: I1128 16:29:00.892526 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-swvhw/must-gather-jl8r5" Nov 28 16:29:02 crc kubenswrapper[4857]: I1128 16:29:02.239587 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" path="/var/lib/kubelet/pods/ab9ccee8-48a0-4596-b4dd-575578b5d6fc/volumes" Nov 28 16:29:06 crc kubenswrapper[4857]: I1128 16:29:06.229557 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:29:06 crc kubenswrapper[4857]: E1128 16:29:06.230417 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:29:18 crc kubenswrapper[4857]: I1128 16:29:18.228513 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:29:18 crc kubenswrapper[4857]: E1128 16:29:18.230119 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:29:29 crc kubenswrapper[4857]: I1128 16:29:29.229290 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:29:29 crc kubenswrapper[4857]: E1128 16:29:29.230211 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.824902 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wkqpq"] Nov 28 16:29:31 crc kubenswrapper[4857]: E1128 16:29:31.825990 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="extract-content" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826006 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="extract-content" Nov 28 16:29:31 crc kubenswrapper[4857]: E1128 16:29:31.826025 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="gather" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826034 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="gather" Nov 28 16:29:31 crc kubenswrapper[4857]: E1128 16:29:31.826063 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="copy" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826072 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="copy" Nov 28 16:29:31 crc kubenswrapper[4857]: E1128 16:29:31.826084 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="registry-server" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826092 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="registry-server" Nov 28 16:29:31 crc kubenswrapper[4857]: E1128 16:29:31.826126 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="extract-utilities" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826134 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="extract-utilities" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826374 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="gather" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826410 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="9afdaf5e-dc0f-4082-9ec5-5e63b371696c" containerName="registry-server" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.826425 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab9ccee8-48a0-4596-b4dd-575578b5d6fc" containerName="copy" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.828267 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.838666 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wkqpq"] Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.977152 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-utilities\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.977204 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9ksf\" (UniqueName: \"kubernetes.io/projected/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-kube-api-access-t9ksf\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:31 crc kubenswrapper[4857]: I1128 16:29:31.977374 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-catalog-content\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.079644 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-catalog-content\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.080212 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-utilities\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.080329 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9ksf\" (UniqueName: \"kubernetes.io/projected/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-kube-api-access-t9ksf\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.080634 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-utilities\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.080788 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-catalog-content\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.100203 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9ksf\" (UniqueName: \"kubernetes.io/projected/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-kube-api-access-t9ksf\") pod \"redhat-operators-wkqpq\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.166551 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:32 crc kubenswrapper[4857]: I1128 16:29:32.694876 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wkqpq"] Nov 28 16:29:33 crc kubenswrapper[4857]: I1128 16:29:33.234306 4857 generic.go:334] "Generic (PLEG): container finished" podID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerID="0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581" exitCode=0 Nov 28 16:29:33 crc kubenswrapper[4857]: I1128 16:29:33.234382 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerDied","Data":"0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581"} Nov 28 16:29:33 crc kubenswrapper[4857]: I1128 16:29:33.234619 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerStarted","Data":"16f2228e91bf65a09d18f1f4fd06c6c7d6f69c645e8c13256037f1cbba92f28c"} Nov 28 16:29:34 crc kubenswrapper[4857]: I1128 16:29:34.218312 4857 scope.go:117] "RemoveContainer" containerID="624a0595ecab4e99cecc273ba5d55dbec6292774988bdd5f378268c5a2d4bdd9" Nov 28 16:29:34 crc kubenswrapper[4857]: I1128 16:29:34.299904 4857 scope.go:117] "RemoveContainer" containerID="8928d24d902fc449a4b4aeabaa809a8afdb503dff0a130afe8dfdf71b1ed8fcf" Nov 28 16:29:35 crc kubenswrapper[4857]: I1128 16:29:35.255115 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerStarted","Data":"ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076"} Nov 28 16:29:36 crc kubenswrapper[4857]: I1128 16:29:36.268003 4857 generic.go:334] "Generic (PLEG): container finished" podID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerID="ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076" exitCode=0 Nov 28 16:29:36 crc kubenswrapper[4857]: I1128 16:29:36.268337 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerDied","Data":"ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076"} Nov 28 16:29:37 crc kubenswrapper[4857]: I1128 16:29:37.280394 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerStarted","Data":"50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488"} Nov 28 16:29:37 crc kubenswrapper[4857]: I1128 16:29:37.299316 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wkqpq" podStartSLOduration=2.72963116 podStartE2EDuration="6.299296376s" podCreationTimestamp="2025-11-28 16:29:31 +0000 UTC" firstStartedPulling="2025-11-28 16:29:33.238728582 +0000 UTC m=+10823.362670019" lastFinishedPulling="2025-11-28 16:29:36.808393798 +0000 UTC m=+10826.932335235" observedRunningTime="2025-11-28 16:29:37.299282826 +0000 UTC m=+10827.423224263" watchObservedRunningTime="2025-11-28 16:29:37.299296376 +0000 UTC m=+10827.423237813" Nov 28 16:29:42 crc kubenswrapper[4857]: I1128 16:29:42.168088 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:42 crc kubenswrapper[4857]: I1128 16:29:42.169204 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:42 crc kubenswrapper[4857]: I1128 16:29:42.220041 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:42 crc kubenswrapper[4857]: I1128 16:29:42.377919 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:42 crc kubenswrapper[4857]: I1128 16:29:42.455844 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wkqpq"] Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.228641 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:29:44 crc kubenswrapper[4857]: E1128 16:29:44.229306 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.344250 4857 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wkqpq" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="registry-server" containerID="cri-o://50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488" gracePeriod=2 Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.821869 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.905029 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9ksf\" (UniqueName: \"kubernetes.io/projected/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-kube-api-access-t9ksf\") pod \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.905084 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-utilities\") pod \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.905190 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-catalog-content\") pod \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\" (UID: \"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df\") " Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.906218 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-utilities" (OuterVolumeSpecName: "utilities") pod "b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" (UID: "b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:29:44 crc kubenswrapper[4857]: I1128 16:29:44.920164 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-kube-api-access-t9ksf" (OuterVolumeSpecName: "kube-api-access-t9ksf") pod "b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" (UID: "b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df"). InnerVolumeSpecName "kube-api-access-t9ksf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.007616 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9ksf\" (UniqueName: \"kubernetes.io/projected/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-kube-api-access-t9ksf\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.007652 4857 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-utilities\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.013694 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" (UID: "b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.110059 4857 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.365340 4857 generic.go:334] "Generic (PLEG): container finished" podID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerID="50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488" exitCode=0 Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.365420 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerDied","Data":"50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488"} Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.365455 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wkqpq" event={"ID":"b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df","Type":"ContainerDied","Data":"16f2228e91bf65a09d18f1f4fd06c6c7d6f69c645e8c13256037f1cbba92f28c"} Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.365476 4857 scope.go:117] "RemoveContainer" containerID="50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.365695 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wkqpq" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.389884 4857 scope.go:117] "RemoveContainer" containerID="ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.407828 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wkqpq"] Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.419541 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wkqpq"] Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.432423 4857 scope.go:117] "RemoveContainer" containerID="0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.470707 4857 scope.go:117] "RemoveContainer" containerID="50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488" Nov 28 16:29:45 crc kubenswrapper[4857]: E1128 16:29:45.471171 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488\": container with ID starting with 50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488 not found: ID does not exist" containerID="50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.471201 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488"} err="failed to get container status \"50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488\": rpc error: code = NotFound desc = could not find container \"50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488\": container with ID starting with 50241e97da788ca42e6b4f982f73acdc838ad882358216398868d7968f443488 not found: ID does not exist" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.471226 4857 scope.go:117] "RemoveContainer" containerID="ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076" Nov 28 16:29:45 crc kubenswrapper[4857]: E1128 16:29:45.471406 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076\": container with ID starting with ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076 not found: ID does not exist" containerID="ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.471427 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076"} err="failed to get container status \"ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076\": rpc error: code = NotFound desc = could not find container \"ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076\": container with ID starting with ba8becd2004cec1023fc1511a89b232d4ec3ac8c5df6791d56ae519b5a6ae076 not found: ID does not exist" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.471441 4857 scope.go:117] "RemoveContainer" containerID="0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581" Nov 28 16:29:45 crc kubenswrapper[4857]: E1128 16:29:45.471601 4857 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581\": container with ID starting with 0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581 not found: ID does not exist" containerID="0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581" Nov 28 16:29:45 crc kubenswrapper[4857]: I1128 16:29:45.471636 4857 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581"} err="failed to get container status \"0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581\": rpc error: code = NotFound desc = could not find container \"0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581\": container with ID starting with 0d91321bf7fae2dfada8bdc41a50916a8c18e3948b36c3cac850d305f2dad581 not found: ID does not exist" Nov 28 16:29:46 crc kubenswrapper[4857]: I1128 16:29:46.242897 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" path="/var/lib/kubelet/pods/b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df/volumes" Nov 28 16:29:59 crc kubenswrapper[4857]: I1128 16:29:59.229493 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:29:59 crc kubenswrapper[4857]: E1128 16:29:59.230255 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.178045 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz"] Nov 28 16:30:00 crc kubenswrapper[4857]: E1128 16:30:00.178607 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.178631 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4857]: E1128 16:30:00.178667 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="extract-content" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.178673 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="extract-content" Nov 28 16:30:00 crc kubenswrapper[4857]: E1128 16:30:00.178688 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="extract-utilities" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.178695 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="extract-utilities" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.178924 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="b48ca4a7-0be7-4dd3-8dbb-b4b6b5a3f7df" containerName="registry-server" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.179787 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.183002 4857 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.183520 4857 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.189787 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz"] Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.303071 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-secret-volume\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.303251 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-config-volume\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.303289 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwn45\" (UniqueName: \"kubernetes.io/projected/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-kube-api-access-cwn45\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.405269 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-secret-volume\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.405670 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-config-volume\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.405694 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwn45\" (UniqueName: \"kubernetes.io/projected/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-kube-api-access-cwn45\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.406619 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-config-volume\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.419718 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-secret-volume\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.423141 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwn45\" (UniqueName: \"kubernetes.io/projected/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-kube-api-access-cwn45\") pod \"collect-profiles-29405790-d5vnz\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.505084 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:00 crc kubenswrapper[4857]: I1128 16:30:00.947152 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz"] Nov 28 16:30:00 crc kubenswrapper[4857]: W1128 16:30:00.952964 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67ecd5ab_da6b_4695_8e28_7e84ca2b69c7.slice/crio-1c0a2a445030710258042aab9a33f0a023a42f5b71178283d386911208f02995 WatchSource:0}: Error finding container 1c0a2a445030710258042aab9a33f0a023a42f5b71178283d386911208f02995: Status 404 returned error can't find the container with id 1c0a2a445030710258042aab9a33f0a023a42f5b71178283d386911208f02995 Nov 28 16:30:01 crc kubenswrapper[4857]: I1128 16:30:01.536634 4857 generic.go:334] "Generic (PLEG): container finished" podID="67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" containerID="cb84d3b1db1c87026148d7ec0b332feef0c20ebb42e6d3a84b22014fe003b82b" exitCode=0 Nov 28 16:30:01 crc kubenswrapper[4857]: I1128 16:30:01.536757 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" event={"ID":"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7","Type":"ContainerDied","Data":"cb84d3b1db1c87026148d7ec0b332feef0c20ebb42e6d3a84b22014fe003b82b"} Nov 28 16:30:01 crc kubenswrapper[4857]: I1128 16:30:01.537004 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" event={"ID":"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7","Type":"ContainerStarted","Data":"1c0a2a445030710258042aab9a33f0a023a42f5b71178283d386911208f02995"} Nov 28 16:30:02 crc kubenswrapper[4857]: I1128 16:30:02.941044 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.064118 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-config-volume\") pod \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.064373 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-secret-volume\") pod \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.064453 4857 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cwn45\" (UniqueName: \"kubernetes.io/projected/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-kube-api-access-cwn45\") pod \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\" (UID: \"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7\") " Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.065586 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-config-volume" (OuterVolumeSpecName: "config-volume") pod "67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" (UID: "67ecd5ab-da6b-4695-8e28-7e84ca2b69c7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.072180 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-kube-api-access-cwn45" (OuterVolumeSpecName: "kube-api-access-cwn45") pod "67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" (UID: "67ecd5ab-da6b-4695-8e28-7e84ca2b69c7"). InnerVolumeSpecName "kube-api-access-cwn45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.072352 4857 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" (UID: "67ecd5ab-da6b-4695-8e28-7e84ca2b69c7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.167568 4857 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.168028 4857 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.168071 4857 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cwn45\" (UniqueName: \"kubernetes.io/projected/67ecd5ab-da6b-4695-8e28-7e84ca2b69c7-kube-api-access-cwn45\") on node \"crc\" DevicePath \"\"" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.557157 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" event={"ID":"67ecd5ab-da6b-4695-8e28-7e84ca2b69c7","Type":"ContainerDied","Data":"1c0a2a445030710258042aab9a33f0a023a42f5b71178283d386911208f02995"} Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.557199 4857 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c0a2a445030710258042aab9a33f0a023a42f5b71178283d386911208f02995" Nov 28 16:30:03 crc kubenswrapper[4857]: I1128 16:30:03.557245 4857 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29405790-d5vnz" Nov 28 16:30:04 crc kubenswrapper[4857]: I1128 16:30:04.012013 4857 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z"] Nov 28 16:30:04 crc kubenswrapper[4857]: I1128 16:30:04.021379 4857 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29405745-rkz2z"] Nov 28 16:30:04 crc kubenswrapper[4857]: I1128 16:30:04.241175 4857 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec6315b8-2c41-4dce-8fa6-badb75453f9c" path="/var/lib/kubelet/pods/ec6315b8-2c41-4dce-8fa6-badb75453f9c/volumes" Nov 28 16:30:14 crc kubenswrapper[4857]: I1128 16:30:14.229298 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:30:14 crc kubenswrapper[4857]: E1128 16:30:14.230094 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:30:25 crc kubenswrapper[4857]: I1128 16:30:25.229458 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:30:25 crc kubenswrapper[4857]: E1128 16:30:25.230248 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:30:34 crc kubenswrapper[4857]: I1128 16:30:34.360049 4857 scope.go:117] "RemoveContainer" containerID="5544fbb6eff98ceb8c3cf47bba7de6439fdcc7bf0f09db2f46ad1559622b3883" Nov 28 16:30:37 crc kubenswrapper[4857]: I1128 16:30:37.229372 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:30:37 crc kubenswrapper[4857]: E1128 16:30:37.231111 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:30:49 crc kubenswrapper[4857]: I1128 16:30:49.228772 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:30:49 crc kubenswrapper[4857]: E1128 16:30:49.229931 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:31:04 crc kubenswrapper[4857]: I1128 16:31:04.228858 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:31:04 crc kubenswrapper[4857]: E1128 16:31:04.229709 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:31:16 crc kubenswrapper[4857]: I1128 16:31:16.229020 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:31:16 crc kubenswrapper[4857]: E1128 16:31:16.230548 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:31:31 crc kubenswrapper[4857]: I1128 16:31:31.229317 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:31:31 crc kubenswrapper[4857]: E1128 16:31:31.231252 4857 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-dshsf_openshift-machine-config-operator(5d5445a4-417c-448a-a8a0-4a4f81828aff)\"" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" podUID="5d5445a4-417c-448a-a8a0-4a4f81828aff" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.050222 4857 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-srh5k"] Nov 28 16:31:35 crc kubenswrapper[4857]: E1128 16:31:35.051887 4857 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" containerName="collect-profiles" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.051906 4857 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" containerName="collect-profiles" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.052478 4857 memory_manager.go:354] "RemoveStaleState removing state" podUID="67ecd5ab-da6b-4695-8e28-7e84ca2b69c7" containerName="collect-profiles" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.056567 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.069125 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srh5k"] Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.217706 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-utilities\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.218078 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sm5bm\" (UniqueName: \"kubernetes.io/projected/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-kube-api-access-sm5bm\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.218271 4857 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-catalog-content\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.320754 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sm5bm\" (UniqueName: \"kubernetes.io/projected/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-kube-api-access-sm5bm\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.320878 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-catalog-content\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.321029 4857 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-utilities\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.321467 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-catalog-content\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.321798 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-utilities\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.347579 4857 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sm5bm\" (UniqueName: \"kubernetes.io/projected/f69d90c4-a448-4074-9a00-fbdf2afcf8a9-kube-api-access-sm5bm\") pod \"community-operators-srh5k\" (UID: \"f69d90c4-a448-4074-9a00-fbdf2afcf8a9\") " pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:35 crc kubenswrapper[4857]: I1128 16:31:35.389311 4857 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:36 crc kubenswrapper[4857]: W1128 16:31:36.053746 4857 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf69d90c4_a448_4074_9a00_fbdf2afcf8a9.slice/crio-04eb4def08e10b378efc0912bba23a1b7654e51b70cfa0c9112268b180d2ec0a WatchSource:0}: Error finding container 04eb4def08e10b378efc0912bba23a1b7654e51b70cfa0c9112268b180d2ec0a: Status 404 returned error can't find the container with id 04eb4def08e10b378efc0912bba23a1b7654e51b70cfa0c9112268b180d2ec0a Nov 28 16:31:36 crc kubenswrapper[4857]: I1128 16:31:36.058368 4857 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-srh5k"] Nov 28 16:31:36 crc kubenswrapper[4857]: I1128 16:31:36.492736 4857 generic.go:334] "Generic (PLEG): container finished" podID="f69d90c4-a448-4074-9a00-fbdf2afcf8a9" containerID="efcfaaada2de217199b1c46676c2b2066e037c1a5cfbb09075453f3405f727f0" exitCode=0 Nov 28 16:31:36 crc kubenswrapper[4857]: I1128 16:31:36.492780 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srh5k" event={"ID":"f69d90c4-a448-4074-9a00-fbdf2afcf8a9","Type":"ContainerDied","Data":"efcfaaada2de217199b1c46676c2b2066e037c1a5cfbb09075453f3405f727f0"} Nov 28 16:31:36 crc kubenswrapper[4857]: I1128 16:31:36.492810 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srh5k" event={"ID":"f69d90c4-a448-4074-9a00-fbdf2afcf8a9","Type":"ContainerStarted","Data":"04eb4def08e10b378efc0912bba23a1b7654e51b70cfa0c9112268b180d2ec0a"} Nov 28 16:31:36 crc kubenswrapper[4857]: I1128 16:31:36.494758 4857 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 28 16:31:38 crc kubenswrapper[4857]: I1128 16:31:38.535678 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srh5k" event={"ID":"f69d90c4-a448-4074-9a00-fbdf2afcf8a9","Type":"ContainerStarted","Data":"6cd5d6e192c5a25740a5e5b05acd1ca620b021e362d86db74a08874e51593c99"} Nov 28 16:31:40 crc kubenswrapper[4857]: I1128 16:31:40.553179 4857 generic.go:334] "Generic (PLEG): container finished" podID="f69d90c4-a448-4074-9a00-fbdf2afcf8a9" containerID="6cd5d6e192c5a25740a5e5b05acd1ca620b021e362d86db74a08874e51593c99" exitCode=0 Nov 28 16:31:40 crc kubenswrapper[4857]: I1128 16:31:40.553270 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srh5k" event={"ID":"f69d90c4-a448-4074-9a00-fbdf2afcf8a9","Type":"ContainerDied","Data":"6cd5d6e192c5a25740a5e5b05acd1ca620b021e362d86db74a08874e51593c99"} Nov 28 16:31:42 crc kubenswrapper[4857]: I1128 16:31:42.573179 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-srh5k" event={"ID":"f69d90c4-a448-4074-9a00-fbdf2afcf8a9","Type":"ContainerStarted","Data":"4110db1930758a5cfc157aa164300e8f882698f2ad0a8c82ece836bb16456504"} Nov 28 16:31:42 crc kubenswrapper[4857]: I1128 16:31:42.599082 4857 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-srh5k" podStartSLOduration=3.194831092 podStartE2EDuration="8.599059055s" podCreationTimestamp="2025-11-28 16:31:34 +0000 UTC" firstStartedPulling="2025-11-28 16:31:36.494520836 +0000 UTC m=+10946.618462273" lastFinishedPulling="2025-11-28 16:31:41.898748779 +0000 UTC m=+10952.022690236" observedRunningTime="2025-11-28 16:31:42.588912927 +0000 UTC m=+10952.712854354" watchObservedRunningTime="2025-11-28 16:31:42.599059055 +0000 UTC m=+10952.723000492" Nov 28 16:31:45 crc kubenswrapper[4857]: I1128 16:31:45.228924 4857 scope.go:117] "RemoveContainer" containerID="8a6621105ed3b693ac6fbebe439c6724963ff233af722cd9b255685c7859c917" Nov 28 16:31:45 crc kubenswrapper[4857]: I1128 16:31:45.390215 4857 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:45 crc kubenswrapper[4857]: I1128 16:31:45.390274 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:45 crc kubenswrapper[4857]: I1128 16:31:45.466477 4857 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-srh5k" Nov 28 16:31:45 crc kubenswrapper[4857]: I1128 16:31:45.603914 4857 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-dshsf" event={"ID":"5d5445a4-417c-448a-a8a0-4a4f81828aff","Type":"ContainerStarted","Data":"82065f06fad432a105e014543c3e5a8a50c71413aaece17870bbb8b446db4658"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515112347171024447 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015112347172017365 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015112321317016501 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015112321317015451 5ustar corecore